var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://kbsg.rwth-aachen.de/~schiffer/schiffer.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://kbsg.rwth-aachen.de/~schiffer/schiffer.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://kbsg.rwth-aachen.de/~schiffer/schiffer.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Aachener Kompetenzzentrum für interaktive Robotik in Gesundheit, Pflege und Assistenz (AixistenzRobotik).\n \n \n \n \n\n\n \n Rosenthal-von der Pütten, A.; Mertens, A.; Stephan, A.; Schiffer, S.; Abrams, A.; Kopp, S.; and Nitsch, V.\n\n\n \n\n\n\n In Rönnau, A.; Becker, P.; and Behnke, S., editor(s), Roboter für Assistenzfunktionen: Konzeptstudien für die Interaktion in der Praxis, pages 71–122. KIT Scientific Publishing, 2024.\n \n\n\n\n
\n\n\n\n \n \n \"Aachener pdf\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{ Rosenthal:etAl_RoboAssi2024_AixistenzRobotik,\n  author       = {Astrid {Rosenthal{-}von der P{\\"u}tten} and Alexander Mertens and\n                  Astrid Stephan and Stefan Schiffer and Anna Abrams and Stefan Kopp and Verena Nitsch},\n  title        = {{Aachener Kompetenzzentrum f{\\"u}r interaktive Robotik in Gesundheit, Pflege und Assistenz (AixistenzRobotik)}},\n  booktitle    = {Roboter f{\\"u}r Assistenzfunktionen: Konzeptstudien f{\\"u}r die Interaktion in der Praxis},\n  editor       = {R{\\"o}nnau, Arne and Becker, Pascal and Behnke, Sven},\n  doi          = {10.58895/ksp/1000151552-2},\n  doi_book     = {10.58895/ksp/1000151552},\n  year         = {2024},\n  publisher    = {{KIT Scientific Publishing}},\n  url_PDF      = {https://www.ksp.kit.edu/site/chapters/e/10.58895/ksp/1000151552-2/},\n  isbn         = {978-3-7315-1244-8},\n  pages        = {71--122},\n  language     = {german},\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (15)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n BUSSARD - Better Understanding Social Situations for Autonomous Robot Decision-Making.\n \n \n \n \n\n\n \n Schiffer, S.; Rosenthal-von der Pütten, A. M.; and Leibe, B.\n\n\n \n\n\n\n CoRR - Trust, Acceptance and Social Cues in Human-Robot Interaction (SCRITA) Workshop held in conjunction with the IEEE International Conference on Robot and Human Interactive Communication (RO-MAN 2023), abs/2311.06391. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"BUSSARDPaper\n  \n \n \n \"BUSSARD workshop\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ Schiffer-etAl_RO-MAN2023SCRITA-WS_BUSSARD,\n  author       = {Stefan Schiffer and Astrid M. {Rosenthal{-}von der P{\\"{u}}tten} and Bastian Leibe},\n  title        = {{BUSSARD} - Better Understanding Social Situations for Autonomous Robot Decision-Making},\n  journal      = {CoRR - Trust, Acceptance and Social Cues in Human-Robot Interaction (SCRITA)\n  \t\t  Workshop held in conjunction with the IEEE International Conference\n\t\t  on Robot and Human Interactive Communication (RO-MAN 2023)},\n  volume       = {abs/2311.06391},\n  year         = {2023},\n  url          = {https://doi.org/10.48550/arXiv.2311.06391},\n  doi          = {10.48550/ARXIV.2311.06391},\n  eprinttype    = {arXiv},\n  eprint       = {2311.06391},\n  biburl       = {https://dblp.org/rec/journals/corr/abs-2311-06391.bib},\n  key_DBLP     = {DBLP:journals/corr/abs-2311-06391},\n  url_Workshop = {https://scrita.herts.ac.uk/2023/},\n  abstract     = {We report on our effort to create a corpus dataset\n                  of different social context situations in an office\n                  setting for further disciplinary and\n                  interdisciplinary research in computer vision,\n                  psychology, and human-robot-interaction. For social\n                  robots to be able to behave appropriately, they need\n                  to be aware of the social context they act\n                  in. Consider, for example, a robot with the task to\n                  deliver a personal message to a person. If the\n                  person is arguing with an office mate at the time of\n                  message delivery, it might be more appropriate to\n                  delay playing the message as to respect the\n                  recipient's privacy and not to interfere with the\n                  current situation. This can only be done if the\n                  situation is classified correctly and in a second\n                  step if an appropriate behavior is chosen that fits\n                  the social situation. Our work aims to enable robots\n                  accomplishing the task of classifying social\n                  situations by creating a dataset composed of\n                  semantically annotated video scenes of office\n                  situations from television soap operas. The dataset\n                  can then serve as a basis for conducting research in\n                  both computer vision and human-robot interaction.},\n}\n
\n
\n\n\n
\n We report on our effort to create a corpus dataset of different social context situations in an office setting for further disciplinary and interdisciplinary research in computer vision, psychology, and human-robot-interaction. For social robots to be able to behave appropriately, they need to be aware of the social context they act in. Consider, for example, a robot with the task to deliver a personal message to a person. If the person is arguing with an office mate at the time of message delivery, it might be more appropriate to delay playing the message as to respect the recipient's privacy and not to interfere with the current situation. This can only be done if the situation is classified correctly and in a second step if an appropriate behavior is chosen that fits the social situation. Our work aims to enable robots accomplishing the task of classifying social situations by creating a dataset composed of semantically annotated video scenes of office situations from television soap operas. The dataset can then serve as a basis for conducting research in both computer vision and human-robot interaction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Potential Ways to Detect Unfairness in HRI and to Re-establish Positive Group Dynamics.\n \n \n \n \n\n\n \n Rosenthal-von der Pütten, A. M.; and Schiffer, S.\n\n\n \n\n\n\n CoRR - Researching Diversity and Inclusion in Human-Robot Interaction: Methodological, technical and ethical considerations (divHRI) Workshop held in conjunction with the IEEE International Conference on Robot and Human Interactive Communication (RO-MAN 2023), abs/2310.01574. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"PotentialPaper\n  \n \n \n \"Potential workshop\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ Rosenthal-vdP:Schiffer_RO-MAN2023divHRI-WS_PotentialWays,\n  author       = {Astrid M. {Rosenthal{-}von der P{\\"{u}}tten} and Stefan Schiffer},\n  title        = {Potential Ways to Detect Unfairness in {HRI} and to Re-establish Positive Group Dynamics},\n  journal      = {CoRR - Researching Diversity and Inclusion in Human-Robot Interaction:\n                  Methodological, technical and ethical considerations (divHRI)\n\t\t  Workshop held in conjunction with the IEEE International Conference\n\t\t  on Robot and Human Interactive Communication (RO-MAN 2023)},\n  volume       = {abs/2310.01574},\n  year         = {2023},\n  url          = {https://doi.org/10.48550/arXiv.2310.01574},\n  doi          = {10.48550/ARXIV.2310.01574},\n  eprinttype    = {arXiv},\n  eprint       = {2310.01574},\n  biburl       = {https://dblp.org/rec/journals/corr/abs-2310-01574.bib},\n  key_DBLP     = {DBLP:journals/corr/abs-2310-01574},\n  url_Workshop = {https://sites.google.com/view/divhri23/},\n  abstract     = {This paper focuses on the identification of\n                  different algorithm-based biases in robotic\n                  behaviour and their consequences in human-robot\n                  mixed groups. We propose to develop computational\n                  models to detect episodes of microaggression,\n                  discrimination, and social exclusion informed by a)\n                  observing human coping behaviours that are used to\n                  regain social inclusion and b) using system inherent\n                  information that reveal unequal treatment of human\n                  interactants. Based on this information we can start\n                  to develop regulatory mechanisms to promote fairness\n                  and social inclusion in HRI.},\n}\n\n
\n
\n\n\n
\n This paper focuses on the identification of different algorithm-based biases in robotic behaviour and their consequences in human-robot mixed groups. We propose to develop computational models to detect episodes of microaggression, discrimination, and social exclusion informed by a) observing human coping behaviours that are used to regain social inclusion and b) using system inherent information that reveal unequal treatment of human interactants. Based on this information we can start to develop regulatory mechanisms to promote fairness and social inclusion in HRI.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Controlling a Fleet of Autonomous LHD Vehicles in Mining Operation.\n \n \n \n \n\n\n \n Ferrein, A.; Nikolovski, G.; Limpert, N.; Reke, M.; Schiffer, S.; and Scholl, I.\n\n\n \n\n\n\n In Küçük, S., editor(s), Multi-Robot Systems - New Advances, 4. IntechOpen, Rijeka, 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ControllingPaper\n  \n \n \n \"Controlling intech\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Ferrein:etAl_INTECH2023_Controlling-a-Fleet,\n  author       = {Alexander Ferrein and Gjorgji Nikolovski and Nicolas Limpert\n                  and Michael Reke and Stefan Schiffer and Ingrid Scholl},\n  title        = {{Controlling a Fleet of Autonomous LHD Vehicles in Mining Operation}},\n  booktitle    = {Multi-Robot Systems - New Advances},\n  publisher    = {IntechOpen},\n  address      = {Rijeka},\n  year         = {2023},\n  editor       = {Serdar K{\\"u}{\\c{c}}{\\"u}k},\n  chapter      = {4},\n  doi          = {10.5772/intechopen.113044},\n  url          = {https://doi.org/10.5772/intechopen.113044},\n  url_intech   = {https://www.intechopen.com/chapters/88580},\n  abstract     = {In this chapter, we report on our activities to\n                  create and maintain a fleet of autonomous load haul\n                  dump (LHD) vehicles for mining operations. The ever\n                  increasing demand for sustainable solutions and\n                  economic pressure causes innovation in the mining\n                  industry just like in any other branch. In this\n                  chapter, we present our approach to create a fleet\n                  of autonomous special purpose vehicles and to\n                  control these vehicles in mining operations. After\n                  an initial exploration of the site we deploy the\n                  fleet. Every vehicle is running an instance of our\n                  ROS 2-based architecture. The fleet is then\n                  controlled with a dedicated planning module. We also\n                  use continuous environment monitoring to implement a\n                  life-long mapping approach. In our experiments, we\n                  show that a combination of synthetic, augmented and\n                  real training data improves our classifier based on\n                  the deep learning network Yolo v5 to detect our\n                  vehicles, persons and navigation beacons. The\n                  classifier was successfully installed on the NVidia\n                  AGX-Drive platform, so that the abovementioned\n                  objects can be recognised during the dumper\n                  drive. The 3D poses of the detected beacons are\n                  assigned to lanelets and transferred to an existing\n                  map.},\n}
\n
\n\n\n
\n In this chapter, we report on our activities to create and maintain a fleet of autonomous load haul dump (LHD) vehicles for mining operations. The ever increasing demand for sustainable solutions and economic pressure causes innovation in the mining industry just like in any other branch. In this chapter, we present our approach to create a fleet of autonomous special purpose vehicles and to control these vehicles in mining operations. After an initial exploration of the site we deploy the fleet. Every vehicle is running an instance of our ROS 2-based architecture. The fleet is then controlled with a dedicated planning module. We also use continuous environment monitoring to implement a life-long mapping approach. In our experiments, we show that a combination of synthetic, augmented and real training data improves our classifier based on the deep learning network Yolo v5 to detect our vehicles, persons and navigation beacons. The classifier was successfully installed on the NVidia AGX-Drive platform, so that the abovementioned objects can be recognised during the dumper drive. The 3D poses of the detected beacons are assigned to lanelets and transferred to an existing map.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Anomaly Detection in the Metal-Textile Industry for the Reduction of the Cognitive Load of Quality Control Workers.\n \n \n \n \n\n\n \n Arndt, T.; Conzen, M.; Elsen, I.; Ferrein, A.; Galla, O.; Köse, H.; Schiffer, S.; and Tschesche, M.\n\n\n \n\n\n\n In Proceedings of the 16th International Conference on PErvasive Technologies Related to Assistive Environments, of PETRA '23, pages 535–542, New York, NY, USA, 2023. Association for Computing Machinery\n Best Workshop Paper - Runner Up\n\n\n\n
\n\n\n\n \n \n \"AnomalyPaper\n  \n \n \n \"Anomaly acm dl\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Arndt:etAl_PETRA2023_AnomalyDetection,\n  author       = {Arndt, Tobias and Conzen, Max and Elsen, Ingo and\n                  Ferrein, Alexander and Galla, Oskar and K{\\"o}se,\n                  Hakan and Schiffer, Stefan and Tschesche, Matteo},\n  title        = {{Anomaly Detection in the Metal-Textile Industry for the\n                  Reduction of the Cognitive Load of Quality Control Workers}},\n  year         = {2023},\n  isbn         = {9798400700699},\n  publisher    = {Association for Computing Machinery},\n  address      = {New York, NY, USA},\n  url          = {https://doi.org/10.1145/3594806.3596558},\n  url_ACM_DL   = {https://dl.acm.org/doi/abs/10.1145/3594806.3596558},\n  doi          = {10.1145/3594806.3596558},\n  abstract     = {This paper presents an approach for reducing the\n                  cognitive load for humans working in quality control\n                  (QC) for production processes that adhere to the 6σ\n                  -methodology. While 100\\% QC requires every part to\n                  be inspected, this task can be reduced when a\n                  human-in-the-loop QC process gets supported by an\n                  anomaly detection system that only presents those\n                  parts for manual inspection that have a significant\n                  likelihood of being defective. This approach shows\n                  good results when applied to image-based QC for\n                  metal textile products.},\n  booktitle    = {Proceedings of the 16th International Conference on PErvasive Technologies Related to Assistive Environments},\n  pages        = {535--542},\n  numpages     = {8},\n  keywords     = {anomaly detection, datasets, neural networks, process optimization, quality control},\n  location     = {Corfu, Greece},\n  series       = {PETRA '23},\n  note         = {Best Workshop Paper - Runner Up},\n}\n\n  %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%  Leistung und Entgelt Magazine issue on WIRKsam w/ my co-authorship\n%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n
\n
\n\n\n
\n This paper presents an approach for reducing the cognitive load for humans working in quality control (QC) for production processes that adhere to the 6σ -methodology. While 100% QC requires every part to be inspected, this task can be reduced when a human-in-the-loop QC process gets supported by an anomaly detection system that only presents those parts for manual inspection that have a significant likelihood of being defective. This approach shows good results when applied to image-based QC for metal textile products.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kompetenzzentrum WIRKsam - Wirtschaftlicher Wandel in der rheinischen Textil- und Kohleregion mit Künstlicher Intelligenz gemeinsam gestalten.\n \n \n \n \n\n\n \n Jeske, T.; Harlacher, M.; Altepost, A. A.; Schmenk, B.; Ferrein, A.; and Schiffer, S.,\n editors.\n \n\n\n \n\n\n\n Volume 2023 Joh. Heider Verlag GmbH, Bergisch Gladbach, 6 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Kompetenzzentrum rwth\n  \n \n \n \"Kompetenzzentrum ifaa\n  \n \n \n \"Kompetenzzentrum pdf ifaa\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@BOOK{ Leistung-und-Entgelt_2024_WIRKsam,\n  editor       = {Jeske, Tim and Harlacher, Markus and Altepost, Andrea Anna\n                  and Schmenk, Bernhard and Ferrein, Alexander and Schiffer, Stefan},\n  title        = {{K}ompetenzzentrum {WIRK}sam - {W}irtschaftlicher {W}andel in der rheinischen\n                      {T}extil- und {K}ohleregion mit {K}{\\"u}nstlicher {I}ntelligenz gemeinsam gestalten},\n  journal      = {Leistung \\& Entgelt},\n  volume       = {2023},\n  number       = {2},\n  issn         = {2510-0424},\n  address      = {Bergisch Gladbach},\n  publisher    = {Joh. Heider Verlag GmbH},\n  pages        = {46 Seiten : Illustrationen},\n  year         = {2023},\n  month        = {6},\n  subtyp       = {Brochure},\n  key_RWTH     = {962619},\n  url_RWTH     = {https://publications.rwth-aachen.de/record/962619},\n  url_ifaa     = {https://www.arbeitswissenschaft.net/angebote-produkte/broschueren/leistung-und-entgelt-kompetenzzentrum-wirksam},\n  url_PDF_ifaa = {https://www.arbeitswissenschaft.net/fileadmin/user_upload/Klein_Ende_24211_LundE_2_2023_finale_Version_fuer_Druckerei.pdf},\n  abstract     = {Das Kompetenzzentrum WIRKsam ist eines von acht\n                  regionalen Kompetenzzentren der Arbeitsforschung mit\n                  Fokus auf der Gestaltung neuer Arbeitsformen durch\n                  Künstliche Intelligenz. Es hat seine regionale\n                  Verankerung im Rheinischen Revier, das aufgrund des\n                  Kohleausstiegs von einem starken Strukturwandel\n                  betroffen ist. Gleichzeitig ist es Teil der\n                  Rheinischen Textilregion, die sich in den letzten 50\n                  Jahren stark verändert hat.  Künstliche Intelligenz\n                  bietet umfassende Möglichkeiten, die Arbeitswelt mit\n                  innovativen Arbeits- und Prozessabläufen zu\n                  gestalten und Produkte zu verbessern. Sie hilft\n                  Unter- nehmen dabei, im globalen Wettbewerb zu\n                  bestehen und Wohlstand und Arbeitsplätze zu\n                  sichern. Die Arbeiten im Kompetenzzentrum WIRKsam\n                  zielen darauf ab, die Potenziale von KI für die\n                  Unternehmen im Rheinischen Revier zu\n                  erschließen. Der Kern der For- schungsaktivitäten\n                  liegt in der prototypischen Entwicklung und\n                  Einführung von KI-gestützten Systemen zur\n                  Unterstützung von Arbeit in bislang neun\n                  Anwendungsunternehmen. So entstehen Beispiele\n                  guter Praxis, die anderen Unternehmen Orientierung\n                  bieten sollen.  In dieser Ausgabe der "Leistung \\&\n                  Entgelt" werden das vom Bundesministerium für Bil-\n                  dung und Forschung geförderte Projekt vorgestellt\n                  und seine bisher neun Anwendungs- fälle\n                  beschrieben.},\n}\n\n\n%%\n%article{ Harlacher:Niehus_LuE2023WIRKsam_SystematisierungAWFs\n%      pages        = {1--6},\n%% AP 3\n% 3-1_FEG\n% 3-2_Essedea\n% 3-3_Heusch\n%% AP 4\n% 4-1_AUNDE\n% 4-2_R+F\n% 4-3_neusser-fb\n%% AP 5\n% 5-1_GKD\n% 5-2_Heimbach\n% 5-3_Viethen\n%%\n\n\n
\n
\n\n\n
\n Das Kompetenzzentrum WIRKsam ist eines von acht regionalen Kompetenzzentren der Arbeitsforschung mit Fokus auf der Gestaltung neuer Arbeitsformen durch Künstliche Intelligenz. Es hat seine regionale Verankerung im Rheinischen Revier, das aufgrund des Kohleausstiegs von einem starken Strukturwandel betroffen ist. Gleichzeitig ist es Teil der Rheinischen Textilregion, die sich in den letzten 50 Jahren stark verändert hat. Künstliche Intelligenz bietet umfassende Möglichkeiten, die Arbeitswelt mit innovativen Arbeits- und Prozessabläufen zu gestalten und Produkte zu verbessern. Sie hilft Unter- nehmen dabei, im globalen Wettbewerb zu bestehen und Wohlstand und Arbeitsplätze zu sichern. Die Arbeiten im Kompetenzzentrum WIRKsam zielen darauf ab, die Potenziale von KI für die Unternehmen im Rheinischen Revier zu erschließen. Der Kern der For- schungsaktivitäten liegt in der prototypischen Entwicklung und Einführung von KI-gestützten Systemen zur Unterstützung von Arbeit in bislang neun Anwendungsunternehmen. So entstehen Beispiele guter Praxis, die anderen Unternehmen Orientierung bieten sollen. In dieser Ausgabe der \"Leistung & Entgelt\" werden das vom Bundesministerium für Bil- dung und Forschung geförderte Projekt vorgestellt und seine bisher neun Anwendungs- fälle beschrieben.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n WIRKsam : Projektvorstellung.\n \n \n \n \n\n\n \n Jeske, T.; Harlacher, M.; Altepost, A. A.; Schmenk, B.; Ferrein, A.; and Schiffer, S.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 7–12. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"WIRKsam rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{ Jeske:etAl_LuE2023WIRKsam_Projektvorstellung,\n      author       = {Jeske, Tim and Harlacher, Markus and Altepost, Andrea Anna\n                      and Schmenk, Bernhard and Ferrein, Alexander and Schiffer,\n                      Stefan},\n      title        = {{WIRK}sam : {P}rojektvorstellung},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07490},\n      pages        = {7--12},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962599},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multikriterielle KI-basierte Prozesssteuerung und Qualifizierung für Medizinprodukte.\n \n \n \n \n\n\n \n Harlacher, M.; Neihues, S.; Hansen-Ampah, A. T.; Köse, H.; Schiffer, S.; Ferrein, A.; Rezaey, A.; and Dievernich, A.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 16–18. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Multikriterielle rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{ Harlacher:etAl_LuE2023WIRKsam_3-1_FEG,\n      author       = {Harlacher, Markus and Neihues, Sina and Hansen-Ampah, Adjan\n                      Troy and K{\\"o}se, Hakan and Schiffer, Stefan and Ferrein,\n                      Alexander and Rezaey, Arash and Dievernich, Axel},\n      title        = {{M}ultikriterielle {KI}-basierte {P}rozesssteuerung und\n                      {Q}ualifizierung f{\\"u}r {M}edizinprodukte},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07491},\n      pages        = {16--18},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962600},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n KI-Expertensystem für lernförderliche Empfehlungen zur maßgetreuen Produktion von 3D-Textilien mit digital unterstützter Eingangswerterfassung.\n \n \n \n \n\n\n \n Harlacher, M.; Niehues, S.; Merx, W.; Roder, S.; Schiffer, S.; Ferrein, A.; Zohren, M.; and Rezaey, A.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 19-21. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"KI-Expertensystem rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Harlacher:etAl_LuE2023WIRKsam_3-2_Essedea,\n      author       = {Harlacher, Markus and Niehues, Sina and Merx, Wolfgang and\n                      Roder, Simon and Schiffer, Stefan and Ferrein, Alexander and\n                      Zohren, Marc and Rezaey, Arash},\n      title        = {{KI}-{E}xpertensystem f{\\"u}r lernf{\\"o}rderliche {E}mpfehlungen\n                      zur maßgetreuen {P}roduktion von 3{D}-{T}extilien mit\n                      digital unterst{\\"u}tzter {E}ingangswerterfassung},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07492},\n      pages        = {19-21},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962601},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n KI-basierte Unterstützung der Kompetenz- und Fertigkeitsentwicklung für die Metallprofilbearbeitung.\n \n \n \n \n\n\n \n Harlacher, A.; Niehues, S.; Hansen-Ampah, A. T.; Roder, S.; Schiffer, S.; Ferrein, A.; and Zenker, D.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 22-24. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"KI-basierte rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Harlacher:etAl_LuE2023WIRKsam_3-3_Heusch,\n      author       = {Harlacher, Alexander and Niehues, Sina and Hansen-Ampah,\n                      Adjan Troy and Roder, Simon and Schiffer, Stefan and\n                      Ferrein, Alexander and Zenker, Dieter},\n      title        = {{KI}-basierte {U}nterst{\\"u}tzung der {K}ompetenz- und\n                      {F}ertigkeitsentwicklung f{\\"u}r die {M}etallprofilbearbeitung},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07494},\n      pages        = {22-24},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962604},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lernförderliches KI-Varianzmanagement für die Produktion von Geweben mit kundenspezifisch veränderlich ausgeprägten Prüfmerkmalen.\n \n \n \n \n\n\n \n Köse, H.; Schiffer, S.; Ferrein, A.; Ramm, G. M.; Harlacher, M.; Merx, W.; Zohren, M.; Rezaey, A.; Ernst, L.; and Ntzemos, E.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 25-27. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Lernförderliches rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Koese:etAl_LuE2023WIRKsam_4-1_AUNDE,\n      author       = {K{\\"o}se, Hakan and Schiffer, Stefan and Ferrein, Alexander\n                      and Ramm, Gerda Maria and Harlacher, Markus and Merx,\n                      Wolfgang and Zohren, Marc and Rezaey, Arash and Ernst, Leon\n                      and Ntzemos, Emmanuil},\n      title        = {{L}ernf{\\"o}rderliches {KI}-{V}arianzmanagement f{\\"u}r die\n                      {P}roduktion von {G}eweben mit kundenspezifisch\n                      ver{\\"a}nderlich ausgepr{\\"a}gten {P}r{\\"u}fmerkmalen},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07495},\n      pages        = {25-27},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962605},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n KI-Nachfrageprognose zur Verringerung von Lagerbeständen, Produktionsschwankungen und damit verbundener Beschäftigungsbelastung.\n \n \n \n \n\n\n \n Tschesche, M.; Hennig, M.; Schiffer, S.; Ferrein, A.; Ramm, G. M.; Harlacher, M.; Merx, W.; Zohren, M.; Rezaey, A.; Kot, A.; and Smekal, J.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 28-30. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"KI-Nachfrageprognose rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Tschesche:etAl_LuE2023WIRKsam_4-2_R+F,\n      author       = {Tschesche, Matteo and Hennig, Mike and Schiffer, Stefan and\n                      Ferrein, Alexander and Ramm, Gerda Maria and Harlacher,\n                      Markus and Merx, Wolfgang and Zohren, Marc and Rezaey, Arash\n                      and Kot, Aylin and Smekal, J{\\"u}rgen},\n      title        = {{KI}-{N}achfrageprognose zur {V}erringerung von\n                      {L}agerbest{\\"a}nden, {P}roduktionsschwankungen und damit\n                      verbundener {B}esch{\\"a}ftigungsbelastung},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07500},\n      pages        = {28-30},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962613},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Situative KI-Entscheidungsunterstützung zur Abschätzung arbeitsorganisatorischer Folgen im Rahmen des Shopfloor Managements.\n \n \n \n \n\n\n \n Tschesche, M.; Henning, M.; Schiffer, S.; Ferrein, A.; Ramm, G. M.; Harlacher, M.; Merx, W.; and Sahm, J.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 31-33. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Situative rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Tschesche:etAl_LuE2023WIRKsam_4-3_neusser-fb,\n      author       = {Tschesche, Matteo and Henning, Mike and Schiffer, Stefan\n                      and Ferrein, Alexander and Ramm, Gerda Maria and Harlacher,\n                      Markus and Merx, Wolfgang and Sahm, Joachim},\n      title        = {{S}ituative {KI}-{E}ntscheidungsunterst{\\"u}tzung zur\n                      {A}bsch{\\"a}tzung arbeitsorganisatorischer {F}olgen im {R}ahmen\n                      des {S}hopfloor {M}anagements},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07501},\n      pages        = {31-33},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962614},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n KI-basierte Bildauswertung zur Qualitätsverbesserung und Entlastung von Beschäftigten bei der Herstellung von metallischen Filterprodukten.\n \n \n \n \n\n\n \n Hansen-Ampah, A. T.; Boltersdorf, C. D.; Köse, H.; Schiffer, S.; Ferrein, A.; Shahinfar, F. N.; Ramm, G. M.; Zohren, M.; and Herper, D.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 34-36. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"KI-basierte rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{HansenAmpah:etAl_LuE2023WIRKsam_5-1_GKD,\n      author       = {Hansen-Ampah, Adjan Troy and Boltersdorf, Christian Daniel\n                      and K{\\"o}se, Hakan and Schiffer, Stefan and Ferrein, Alexander\n                      and Shahinfar, Fatemeh N. and Ramm, Gerda Maria and Zohren,\n                      Marc and Herper, Dominik},\n      title        = {{KI}-basierte {B}ildauswertung zur {Q}ualit{\\"a}tsverbesserung\n                      und {E}ntlastung von {B}esch{\\"a}ftigten bei der {H}erstellung\n                      von metallischen {F}ilterprodukten},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07503},\n      pages        = {34-36},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962616},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unterstützung von Produktentwicklung und Qualitätssicherung durch KI-basierte Vergleiche von Produktkennwerten vor und nach dem Einsatz an Papiermaschinen.\n \n \n \n \n\n\n \n Hansen-Ampah, A. T.; Arndt, T.; Schiffer, S.; Ferrein, A.; Shahinfar, F. N.; Ramm, G. M.; and Klopp, K.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 37-39. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Unterstützung rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{HansenAmpah:etAl_LuE2023WIRKsam_5-1_Heimbach,\n      author       = {Hansen-Ampah, Adjan Troy and Arndt, Tobias and Schiffer,\n                      Stefan and Ferrein, Alexander and Shahinfar, Fatemeh N. and\n                      Ramm, Gerda Maria and Klopp, Kai},\n      title        = {{U}nterst{\\"u}tzung von {P}roduktentwicklung und\n                      {Q}ualit{\\"a}tssicherung durch {KI}-basierte {V}ergleiche von\n                      {P}roduktkennwerten vor und nach dem {E}insatz an\n                      {P}apiermaschinen},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07504},\n      pages        = {37-39},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962617},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nutzung einer Mensch-Roboter-Kollaboration zum Erlernen komplexer motorischer Fertigkeiten für Tätigkeiten in der Faserverbundherstellung.\n \n \n \n \n\n\n \n Hansen-Ampah, A. T.; Backes, S. C.; Arndt, T.; Schiffer, S.; Ferrein, A.; Shahinfar, F. N.; Ramm, G. M.; and Viethen, H.\n\n\n \n\n\n\n Leistung & Entgelt, 2023(2): 40-42. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Nutzung rwth\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{HansenAmpah:etAl_LuE2023WIRKsam_5-1_Viethen,\n      author       = {Hansen-Ampah, Adjan Troy and Backes, Sebastian Christoph\n                      and Arndt, Tobias and Schiffer, Stefan and Ferrein,\n                      Alexander and Shahinfar, Fatemeh N. and Ramm, Gerda Maria\n                      and Viethen, Heinrich},\n      title        = {{N}utzung einer {M}ensch-{R}oboter-{K}ollaboration zum\n                      {E}rlernen komplexer motorischer {F}ertigkeiten f{\\"u}r\n                      {T}{\\"a}tigkeiten in der {F}aserverbundherstellung},\n      journal      = {Leistung \\& Entgelt},\n      volume       = {2023},\n      number       = {2},\n      issn         = {2510-0424},\n      address      = {Bergisch-Gladbach},\n      publisher    = {Joh. Heider Verlag GmbH},\n      reportid     = {RWTH-2023-07505},\n      pages        = {40-42},\n      year         = {2023},\n      url_RWTH     = {https://publications.rwth-aachen.de/record/962618},\n}\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Machine learning based 3D object detection for navigation in unstructured environments.\n \n \n \n \n\n\n \n Nikolovski, G.; Reke, M.; Elsen, I.; and Schiffer, S.\n\n\n \n\n\n\n In 2021 IEEE Intelligent Vehicles Symposium Workshops (IV Workshops), pages 236–242, July 2021. \n \n\n\n\n
\n\n\n\n \n \n \"Machine ieeexplore\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@INPROCEEDINGS{ Nikolovski:etAl_IV2021WS_ML3D-ObjDet,\n  author       = {Nikolovski, Gjorgji and Reke, Michael and Elsen, Ingo and Schiffer, Stefan},\n  booktitle    = {2021 IEEE Intelligent Vehicles Symposium Workshops (IV Workshops)},\n  title        = {{Machine learning based 3D object detection for navigation in unstructured environments}},\n  year         = {2021},\n  pages        = {236--242},\n  abstract     = {In this paper we investigate the use of deep neural\n                  networks for 3D object detection in uncommon,\n                  unstructured environments such as in an open-pit\n                  mine. While neural nets are frequently used for\n                  object detection in regular autonomous driving\n                  applications, more unusual driving scenarios aside\n                  street traffic pose additional challenges. For one,\n                  the collection of appropriate data sets to train the\n                  networks is an issue. For another, testing the\n                  performance of trained networks often requires\n                  tailored integration with the particular domain as\n                  well. While there exist different solutions for\n                  these problems in regular autonomous driving, there\n                  are only very few approaches that work for special\n                  domains just as well. We address both the challenges\n                  above in this work. First, we discuss two possible\n                  ways of acquiring data for training and\n                  evaluation. That is, we evaluate a semi-automated\n                  annotation of recorded LIDAR data and we examine\n                  synthetic data generation. Using these datasets we\n                  train and test different deep neural network for the\n                  task of object detection. Second, we propose a\n                  possible integration of a ROS2 detector module for\n                  an autonomous driving platform. Finally, we present\n                  the performance of three state-of-the-art deep\n                  neural networks in the domain of 3D object detection\n                  on a synthetic dataset and a smaller one containing\n                  a characteristic object from an open-pit mine.},\n  keywords     = {Deep learning;Training;Solid\n                  modeling;Three-dimensional\n                  displays;Annotations;Conferences;Neural networks;3D\n                  object detection;LiDAR;autonomous driving},\n  doi          = {10.1109/IVWorkshops54471.2021.9669218},\n  url_IEEExplore = {https://ieeexplore.ieee.org/abstract/document/9669218},\n  ID_IEEE      = {9669218},\n  month        = {July},\n}\n
\n
\n\n\n
\n In this paper we investigate the use of deep neural networks for 3D object detection in uncommon, unstructured environments such as in an open-pit mine. While neural nets are frequently used for object detection in regular autonomous driving applications, more unusual driving scenarios aside street traffic pose additional challenges. For one, the collection of appropriate data sets to train the networks is an issue. For another, testing the performance of trained networks often requires tailored integration with the particular domain as well. While there exist different solutions for these problems in regular autonomous driving, there are only very few approaches that work for special domains just as well. We address both the challenges above in this work. First, we discuss two possible ways of acquiring data for training and evaluation. That is, we evaluate a semi-automated annotation of recorded LIDAR data and we examine synthetic data generation. Using these datasets we train and test different deep neural network for the task of object detection. Second, we propose a possible integration of a ROS2 detector module for an autonomous driving platform. Finally, we present the performance of three state-of-the-art deep neural networks in the domain of 3D object detection on a synthetic dataset and a smaller one containing a characteristic object from an open-pit mine.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CO2 Meter: A do-it-yourself carbon dioxide measuring device for the classroom.\n \n \n \n \n\n\n \n Dey, T.; Elsen, I.; Ferrein, A.; Frauenrath, T.; Reke, M.; and Schiffer, S.\n\n\n \n\n\n\n In Proceedings of the 14th PErvasive Technologies Related to Assistive Environments Conference, of PETRA '21, pages 292–299, New York, NY, USA, 2021. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"CO2Paper\n  \n \n \n \"CO2 acm dl\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Dey:etAl_PETRA2021_CO2Meter,\n  author       = {Dey, Thomas and Elsen, Ingo and Ferrein, Alexander and Frauenrath, Tobias and Reke, Michael and Schiffer, Stefan},\n  title        = {{CO2 Meter}: A do-it-yourself carbon dioxide measuring device for the classroom},\n  booktitle    = {Proceedings of the 14th PErvasive Technologies Related to Assistive Environments Conference},\n  pages        = {292--299},\n  numpages     = {8},\n  keywords     = {sensor networks, information systems, embedded hardware, education, do-it-yourself},\n  location     = {Corfu, Greece},\n  series       = {PETRA '21},\n  year         = {2021},\n  isbn         = {9781450387927},\n  publisher    = {Association for Computing Machinery},\n  address      = {New York, NY, USA},\n  url          = {https://doi.org/10.1145/3453892.3462697},\n  url_ACM_DL   = {https://dl.acm.org/doi/abs/10.1145/3453892.3462697},\n  doi          = {10.1145/3453892.3462697},\n  abstract     = {In this paper we report on CO2 Meter, a\n                  do-it-yourself carbon dioxide measuring device for\n                  the classroom. Part of the current measures for\n                  dealing with the SARS-CoV-2 pandemic is proper\n                  ventilation in indoor settings. This is especially\n                  important in schools with students coming back to\n                  the classroom even with high incidents rates. Static\n                  ventilation patterns do not consider the individual\n                  situation for a particular class. Influencing\n                  factors like the type of activity, the physical\n                  structure or the room occupancy are not\n                  incorporated. Also, existing devices are rather\n                  expensive and often provide only limited information\n                  and only locally without any networking. This leaves\n                  the potential of analysing the situation across\n                  different settings untapped. Carbon dioxide level\n                  can be used as an indicator of air quality, in\n                  general, and of aerosol load in particular. Since,\n                  according to the latest findings, SARS-CoV-2 can be\n                  transmitted primarily in the form of aerosols,\n                  carbon dioxide may be used as a proxy for the risk\n                  of a virus infection. Hence, schools could improve\n                  the indoor air quality and potentially reduce the\n                  infection risk if they actually had measuring\n                  devices available in the classroom. Our device\n                  supports schools in ventilation and it allows for\n                  collecting data over the Internet to enable a\n                  detailed data analysis and model generation. First\n                  deployments in schools at different levels were\n                  received very positively. A pilot installation with\n                  a larger data collection and analysis is underway.},\n}\n
\n
\n\n\n
\n In this paper we report on CO2 Meter, a do-it-yourself carbon dioxide measuring device for the classroom. Part of the current measures for dealing with the SARS-CoV-2 pandemic is proper ventilation in indoor settings. This is especially important in schools with students coming back to the classroom even with high incidents rates. Static ventilation patterns do not consider the individual situation for a particular class. Influencing factors like the type of activity, the physical structure or the room occupancy are not incorporated. Also, existing devices are rather expensive and often provide only limited information and only locally without any networking. This leaves the potential of analysing the situation across different settings untapped. Carbon dioxide level can be used as an indicator of air quality, in general, and of aerosol load in particular. Since, according to the latest findings, SARS-CoV-2 can be transmitted primarily in the form of aerosols, carbon dioxide may be used as a proxy for the risk of a virus infection. Hence, schools could improve the indoor air quality and potentially reduce the infection risk if they actually had measuring devices available in the classroom. Our device supports schools in ventilation and it allows for collecting data over the Internet to enable a detailed data analysis and model generation. First deployments in schools at different levels were received very positively. A pilot installation with a larger data collection and analysis is underway.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Portable High-level Agent Programming with golog++.\n \n \n \n \n\n\n \n Mataré, V.; Viehmann, T.; Hofmann, T.; Lakemeyer, G.; Ferrein, A.; and Schiffer, S.\n\n\n \n\n\n\n In Proceedings of the 13th International Conference on Agents and Artificial Intelligence - Volume 2: ICAART,, pages 218–227, 2021. INSTICC, SciTePress\n \n\n\n\n
\n\n\n\n \n \n \"Portable scitepress\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Matare-etAl_ICAART2021_PortableHighLevelAgentCode,\n  author       = {Victor Matar{\\'e} and Tarik Viehmann and Till Hofmann and Gerhard Lakemeyer and Alexander Ferrein and Stefan Schiffer},\n  title        = {{Portable High-level Agent Programming with golog++}},\n  booktitle    = {Proceedings of the 13th International Conference on Agents and Artificial Intelligence - Volume 2: ICAART,},\n  year         = {2021},\n  pages        = {218--227},\n  publisher    = {SciTePress},\n  organization = {INSTICC},\n  doi          = {10.5220/0010253902180227},\n  url_scitepress = {https://www.scitepress.org/Papers/2021/102539/},\n  isbn         = {978-989-758-484-8},\n  abstract     = {We present golog++, a high-level agent programming\n                  and interfacing framework that offers a temporal\n                  constraint language to explicitly model\n                  layer-penetrating contingencies in low-level\n                  platform behavior. It can be used to maintain a\n                  clear separation between an agent’s domain model and\n                  certain quirks of its execution platform that affect\n                  problem solving behavior. Our system reasons about\n                  the execution of an abstract (i.e. exclusively\n                  domain-bound) plan on a particular execution\n                  platform. This way, we avoid compounding the\n                  complexity of the planning problem while improving\n                  the modularity of both golog++ and the user code. On\n                  a run-through example from the well-known\n                  blocksworld domain, we demonstrate the entire\n                  process from domain modeling and platform modeling\n                  to plan transformation and platform-specific plan\n                  execution.},\n}
\n
\n\n\n
\n We present golog++, a high-level agent programming and interfacing framework that offers a temporal constraint language to explicitly model layer-penetrating contingencies in low-level platform behavior. It can be used to maintain a clear separation between an agent’s domain model and certain quirks of its execution platform that affect problem solving behavior. Our system reasons about the execution of an abstract (i.e. exclusively domain-bound) plan on a particular execution platform. This way, we avoid compounding the complexity of the planning problem while improving the modularity of both golog++ and the user code. On a run-through example from the well-known blocksworld domain, we demonstrate the entire process from domain modeling and platform modeling to plan transformation and platform-specific plan execution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DiaBuLI – Building Dialogues for Human-Robot Interactions by Learning from Object Information.\n \n \n \n \n\n\n \n Schiffer, S.; Arndt, J.; Platte, L.; Madyal, J.; and Spangenberg, M.\n\n\n \n\n\n\n In Jokinen, K.; Heckmann, M.; Lala, D.; and Lison, P., editor(s), 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction RobotDial2020, Held in Conjunction with IJCAI-PRICAI 2020, pages 43–51, January 2021. ISCA\n \n\n\n\n
\n\n\n\n \n \n \"DiaBuLIPaper\n  \n \n \n \"DiaBuLI workshop\n  \n \n \n \"DiaBuLI ws-alt\n  \n \n \n \"DiaBuLI pdf\n  \n \n \n \"DiaBuLI pdf-alt\n  \n \n \n \"DiaBuLI rwth\n  \n \n \n \"DiaBuLI talkvid\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{IJCAI-WS-ROBOTDIAL2020_DiaBuLI,\n  author       = {Stefan Schiffer and Julia Arndt and Laura Platte and Jayadev Madyal and Marlon Spangenberg},\n  editor       = {Kristiina Jokinen and Martin Heckmann and Divesh Lala and Pierre Lison},\n  title        = {{DiaBuLI} -- Building Dialogues for Human-Robot Interactions by Learning from Object Information},\n  booktitle    = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction {RobotDial2020},\n                  Held in Conjunction with {IJCAI-PRICAI} 2020},\n  date         = {2021-01-08},\n  year         = {2021},\n  month        = Jan,\n  day          = {8},\n  pages        = {43--51},\n  publisher    = {ISCA},\n  url          = {https://www.isca-speech.org/archive/robotdial_2021/schiffer21_robotdial.html},\n  url_Workshop = {https://www.isca-speech.org/archive/robotdial_2021/},\n  url_WS-alt   = {http://sap.ist.i.kyoto-u.ac.jp/ijcai2020/robotdial/},\n  url_PDF      = {https://www.isca-speech.org/archive/pdfs/robotdial_2021/schiffer21_robotdial.pdf},\n  url_PDF-alt  = {http://sap.ist.i.kyoto-u.ac.jp/ijcai2020/robotdial/14.pdf},\n  url_RWTH     = {https://publications.rwth-aachen.de/record/822421/},\n  url_TalkVid  = {https://ijcai20.org/w19/},\n  doi          = {10.21437/RobotDial.2021-7},\n  abstract     = {We report on preliminary results of building a\n                  human-robot dialogue by using decision tree\n                  learning.  The system is particularly suited for\n                  dialogues that go along with decision processes.\n                  The self-developed robot MoBi should assist children\n                  in the classroom with waste management.  To do so,\n                  MoBi asks a couple of yes/no-questions about a waste\n                  item to dispose in order to decide on the correct\n                  bin.  Since the dialogue mimics the decision process\n                  at hand, we are interested in whether we could make\n                  use of a classification algorithm, namely decision\n                  tree learning, to help us build the dialogue\n                  structure.  We take a collection of instances of the\n                  classification task where we know the right bin\n                  decision for and we characterize these examples by a\n                  set of attributes that describe features like\n                  material and usage properties.  Then, we perform\n                  decision tree learning to generate a tree which\n                  builds the basis for the dialogue with MoBi.\n                  Existing approaches have investigated the use of\n                  learning to optimize the length or number of turns\n                  in an interaction dialogue.  We are also interested\n                  in optimizing our dialogue but we look into finding\n                  other interesting qualities as well.  We compare\n                  trees resulting from different configurations of our\n                  decision tree learning both with one another as well\n                  as with hand-crafted dialogues used for the robot\n                  MoBi so far.},\n}
\n
\n\n\n
\n We report on preliminary results of building a human-robot dialogue by using decision tree learning. The system is particularly suited for dialogues that go along with decision processes. The self-developed robot MoBi should assist children in the classroom with waste management. To do so, MoBi asks a couple of yes/no-questions about a waste item to dispose in order to decide on the correct bin. Since the dialogue mimics the decision process at hand, we are interested in whether we could make use of a classification algorithm, namely decision tree learning, to help us build the dialogue structure. We take a collection of instances of the classification task where we know the right bin decision for and we characterize these examples by a set of attributes that describe features like material and usage properties. Then, we perform decision tree learning to generate a tree which builds the basis for the dialogue with MoBi. Existing approaches have investigated the use of learning to optimize the length or number of turns in an interaction dialogue. We are also interested in optimizing our dialogue but we look into finding other interesting qualities as well. We compare trees resulting from different configurations of our decision tree learning both with one another as well as with hand-crafted dialogues used for the robot MoBi so far.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Constraint-based Plan Transformation in a Safe and Usable GOLOG Language.\n \n \n \n \n\n\n \n Mataré, V.; Schiffer, S.; Ferrein, A.; Viehmann, T.; Hofmann, T.; and Lakemeyer, G.\n\n\n \n\n\n\n In Aertbeliën, E.; Borghesan, G.; Bruyninckx, H.; Decré, W.; Gergondet, P.; Kheddar, A.; Caldwell, D.; Hoffman, E. M.; and Tingelstad, L., editor(s), Proceedings of the Workshop on Bringing Constraint-based Robot Programming to Real-World Applications (CobaRoP) held with the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), October 2020. \n to appear\n\n\n\n
\n\n\n\n \n \n \"Constraint-based workshop\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Matare:EtAl:IROS2020WS:ConTrAkt,\n  author       = {Victor Matar{\\'e} and Stefan Schiffer and Alexander Ferrein and Tarik Viehmann and Till Hofmann and Gerhard Lakemeyer},\n  title        = {Constraint-based Plan Transformation in a Safe and Usable {GOLOG} Language},\n  booktitle    = {Proceedings of the Workshop on Bringing Constraint-based Robot Programming to Real-World Applications (CobaRoP)\n                  held with the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},\n  editor       = {Erwin Aertbeli{\\"e}n and Gianni Borghesan and Herman Bruyninckx and Wilm Decr{\\'e}\n                  and Pierre Gergondet and Abderrahmane Kheddar\n                  and Darwin Caldwell and Enrico Mingo Hoffman\n                  and Lars Tingelstad},\n  month        = {October},\n  day          = {25--29},\n  year         = {2020},\n  location     = {Las Vegas, NV, USA},\n  url_Workshop = {https://iros2020-workshop-cobarop.gitlab.io/},\n  note         = {to appear},\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compiling ROS Schooling Curricula via Contentual Taxonomies.\n \n \n \n \n\n\n \n Ferrein, A.; Meeßen, M.; Limpert, N.; and Schiffer, S.\n\n\n \n\n\n\n In Balogh, R.; Lepuschitz, W.; and Obdrzálek, D., editor(s), Robotics in Education - Methods and Applications for Teaching and Learning, Proceedings of the 11th International Conference on Robotics and Education (RiE 2020), of Advances in Intelligent Systems and Computing, 2020. Springer\n to appear\n\n\n\n
\n\n\n\n \n \n \"CompilingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ferrein:EtAl:RIE2020:CROSSCUT,\n  author       = {Alexander Ferrein and Marcus Mee{\\ss}en and Nicolas Limpert and Stefan Schiffer},\n  editor       = {Richard Balogh and Wilfried Lepuschitz and David Obdrz{\\'{a}}lek},\n  title        = {Compiling {ROS} Schooling Curricula via Contentual Taxonomies},\n  booktitle    = {Robotics in Education - Methods and Applications for Teaching and Learning,\n                  Proceedings of the 11th International Conference on Robotics and Education (RiE 2020)},\n  series       = {Advances in Intelligent Systems and Computing},\n  volume       = {},\n  OPTpages     = {x--y},\n  publisher    = {Springer},\n  year         = {2020},\n  url          = {},\n  doi          = {},\n  abstract     = {The Robot Operating System (ROS) is the current\n                  de-facto standard in robot middlewares. The steadily\n                  increasing size of the user base results in a\n                  greater demand for training as well. User groups\n                  range from students in academia to industry\n                  professionals with a broad spec- trum of developers\n                  in between. To deliver high quality training and\n                  edu- cation to any of these audiences, educators\n                  need to tailor individual cur- ricula for any such\n                  training. In this paper, we present an approach to\n                  ease compiling curricula for ROS trainings based on\n                  a taxonomy of the teaching contents. The instructor\n                  can select a set of dedicated learning units and the\n                  system will automatically compile the teaching\n                  material based on the dependencies of the units\n                  selected and a set of paramet- ers for a particular\n                  training. We walk through an example training to\n                  illustrate our work.},\n  note         = {to appear},\n}\n
\n
\n\n\n
\n The Robot Operating System (ROS) is the current de-facto standard in robot middlewares. The steadily increasing size of the user base results in a greater demand for training as well. User groups range from students in academia to industry professionals with a broad spec- trum of developers in between. To deliver high quality training and edu- cation to any of these audiences, educators need to tailor individual cur- ricula for any such training. In this paper, we present an approach to ease compiling curricula for ROS trainings based on a taxonomy of the teaching contents. The instructor can select a set of dedicated learning units and the system will automatically compile the teaching material based on the dependencies of the units selected and a set of paramet- ers for a particular training. We walk through an example training to illustrate our work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Smart Factory Setup based on the RoboCup Logistics League.\n \n \n \n\n\n \n Eltester, N. S.; Ferrein, A.; and Schiffer, S.\n\n\n \n\n\n\n In 2020 IEEE International Conference on Industrial Cyber Physical Systems (ICPS), June 10-12 2020. \n to appear\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{ Eltester:EtAl:ICPS2020:SmartFactoryAC,\n  author       = {Niklas Sebastian {Eltester} and Alexander {Ferrein} and Stefan {Schiffer}},\n  booktitle    = {2020 IEEE International Conference on Industrial Cyber Physical Systems (ICPS)},\n  OPTeditor       = {},\n  title        = {A Smart Factory Setup based on the RoboCup Logistics League}, \n  year         = {2020},\n  month        = {June 10-12},\n  location     = {Tampere, Finland - ONLINE},\n  OPTpages        = {x--y},\n  abstract     = {In this paper we present Smart-fACtory, a setup for\n                  a research and teaching facility in industrial\n                  robotics that is based on the RoboCup Logistics\n                  League. It is driven by the need for developing and\n                  applying solutions for digital production.\n                  Digitization receives constantly increasing\n                  attention in many areas, especially in industry. The\n                  common theme is to make things smart by using\n                  intelligent computer technology. Especially in the\n                  last decade there have been many attempts to improve\n                  existing processes in factories, for example, in\n                  production logistics, also with deploying\n                  cyber-physical systems. An initiative that explores\n                  challenges and opportunities for robots in such a\n                  setting is the RoboCup Logistics League. Since its\n                  foundation in 2012 it is an international effort for\n                  research and education in an intra- warehouse\n                  logistics scenario. During seven years of\n                  competition a lot of knowledge and experience\n                  regarding autonomous robots was gained. This\n                  knowledge and experience shall provide the basis for\n                  further research in challenges of future\n                  production. The focus of our Smart-fACtory is to\n                  create a stimulating envi- ronment for research on\n                  logistics robotics, for teaching activities in\n                  computer science and electrical engineering\n                  programmes as well as for industrial users to study\n                  and explore the feasibility of future\n                  technologies. Building on a very successful history\n                  in the RoboCup Logistics League we aim to provide\n                  stakeholders with a dedicated facility oriented at\n                  their individual needs.},\n  keywords     = {logistics robotics, Industry 4.0, smart factory,\n                  RoboCup Logistics League, RCLL},\n  doi          = {10.1109/ICPHYS.2020.xxx},\n  note         = {to appear},\n}\n\n
\n
\n\n\n
\n In this paper we present Smart-fACtory, a setup for a research and teaching facility in industrial robotics that is based on the RoboCup Logistics League. It is driven by the need for developing and applying solutions for digital production. Digitization receives constantly increasing attention in many areas, especially in industry. The common theme is to make things smart by using intelligent computer technology. Especially in the last decade there have been many attempts to improve existing processes in factories, for example, in production logistics, also with deploying cyber-physical systems. An initiative that explores challenges and opportunities for robots in such a setting is the RoboCup Logistics League. Since its foundation in 2012 it is an international effort for research and education in an intra- warehouse logistics scenario. During seven years of competition a lot of knowledge and experience regarding autonomous robots was gained. This knowledge and experience shall provide the basis for further research in challenges of future production. The focus of our Smart-fACtory is to create a stimulating envi- ronment for research on logistics robotics, for teaching activities in computer science and electrical engineering programmes as well as for industrial users to study and explore the feasibility of future technologies. Building on a very successful history in the RoboCup Logistics League we aim to provide stakeholders with a dedicated facility oriented at their individual needs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrating golog++ and ROS for Practical and Portable High-level Control.\n \n \n \n \n\n\n \n Kirsch, M.; Mataré, V.; Ferrein, A.; and Schiffer, S.\n\n\n \n\n\n\n In Rocha, A. P.; Steels, L.; and van den Herik, H. J., editor(s), Proceedings of the 12th International Conference on Agents and Artificial Intelligence ICAART 2020, volume 2, pages 692–699, February 22-24 2020. SCITEPRESS\n \n\n\n\n
\n\n\n\n \n \n \"Integrating dblp\n  \n \n \n \"Integrating dblp-bib\n  \n \n \n \"Integrating doi\n  \n \n \n \"Integrating scitepress\n  \n \n \n \"Integrating talkentry\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{KirschEtAl_ICAART2020_IntegratingGolog++ROS,\n  author       = {Maximillian Kirsch and Victor Matar{\\'{e}} and\n                  Alexander Ferrein and Stefan Schiffer},\n  editor       = {Ana Paula Rocha and Luc Steels and H. Jaap van den Herik},\n  title        = {Integrating {golog{++}} and {ROS} for Practical and Portable High-level Control},\n  booktitle    = {Proceedings of the 12th International Conference on Agents and Artificial Intelligence {ICAART} 2020},\n  pages        = {692--699},\n  volume       = {2},\n  location     = {Valletta, Malta},\n  publisher    = {{SCITEPRESS}},\n  month        = {February 22-24},\n  year         = {2020},\n  doi          = {10.5220/0008984406920699},\n  url_dblp     = {https://dblp.uni-trier.de/rec/conf/icaart/KirschMF020.html},\n  url_dblp-bib = {https://dblp.org/rec/conf/icaart/KirschMF020.bib},\n  url_doi      = {https://doi.org/10.5220/0008984406920699},\n  url_ScitePress = {https://www.scitepress.org/Link.aspx?doi=10.5220/0008984406920699},\n  url_TalkEntry  = {https://www.insticc.org/node/TechnicalProgram/icaart/2020/presentationDetails/89844},\n  abstract     = {The field of Cognitive Robotics aims at intelligent\n                  decision making of autonomous robots. It has matured\n                  over the last 25 or so years quite a bit. That is, a\n                  number of high-level control languages and\n                  architectures have emerged from the field. One\n                  concern in this regard is the action language\n                  GOLOG. GOLOG has been used in a rather large number\n                  of applications as a high-level control language\n                  ranging from intelligent service robots to soccer\n                  robots. For the lower level robot software, the\n                  Robot Operating System (ROS) has been around for\n                  more than a decade now and it has developed into the\n                  standard middleware for robot applications. ROS\n                  provides a large number of packages for standard\n                  tasks in robotics like localisation, navigation, and\n                  object recognition. Interestingly enough, only\n                  little work within ROS has gone into the high-level\n                  control of robots. In this paper, we describe our\n                  approach to marry the GOLOG action language with\n                  ROS. In particular, we present our architecture on\n                  integ rating golog++, which is based on the GOLOG\n                  dialect Readylog, with the Robot Operating\n                  System. With an example application on the Pepper\n                  service robot, we show how primitive actions can be\n                  easily mapped to the ROS ActionLib framework and\n                  present our control architecture in detail.},\n}\n\n
\n
\n\n\n
\n The field of Cognitive Robotics aims at intelligent decision making of autonomous robots. It has matured over the last 25 or so years quite a bit. That is, a number of high-level control languages and architectures have emerged from the field. One concern in this regard is the action language GOLOG. GOLOG has been used in a rather large number of applications as a high-level control language ranging from intelligent service robots to soccer robots. For the lower level robot software, the Robot Operating System (ROS) has been around for more than a decade now and it has developed into the standard middleware for robot applications. ROS provides a large number of packages for standard tasks in robotics like localisation, navigation, and object recognition. Interestingly enough, only little work within ROS has gone into the high-level control of robots. In this paper, we describe our approach to marry the GOLOG action language with ROS. In particular, we present our architecture on integ rating golog++, which is based on the GOLOG dialect Readylog, with the Robot Operating System. With an example application on the Pepper service robot, we show how primitive actions can be easily mapped to the ROS ActionLib framework and present our control architecture in detail.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Self-Driving Car Architecture in ROS2.\n \n \n \n \n\n\n \n Reke, M.; Peter, D.; Schulte-Tigges, J.; Schiffer, S.; Ferrein, A.; Walter, T.; and Matheis, D.\n\n\n \n\n\n\n In 2020 International SAUPEC/RobMech/PRASA Conference, pages 1–6, Jan 2020. \n \n\n\n\n
\n\n\n\n \n \n \"A ieeexplore\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@INPROCEEDINGS{Reke:EtAl:RobMech2020:SelfDrivingCarArchROS2,\n  author       = {M. {Reke} and D. {Peter} and J. {Schulte-Tigges} and\n                  S. {Schiffer} and A. {Ferrein} and T. {Walter} and\n                  D. {Matheis}},\n  booktitle    = {2020 International SAUPEC/RobMech/PRASA Conference}, \n  title        = {A Self-Driving Car Architecture in {ROS2}}, \n  month        = {Jan},\n  year         = {2020},\n  pages        = {1--6},\n  abstract     = {In this paper we report on an architecture for a\n                  self-driving car that is based on ROS2. Self-driving\n                  cars have to take decisions based on their sensory\n                  input in real-time, providing high reliability with\n                  a strong demand in functional safety. In principle,\n                  self-driving cars are robots. However, typical robot\n                  software, in general, and the previous version of\n                  the Robot Operating System (ROS), in particular,\n                  does not always meet these requirements. With the\n                  successor ROS2 the situation has changed and it\n                  might be considered as a solution for automated and\n                  autonomous driving. Existing robotic software based\n                  on ROS was not ready for safety critical\n                  applications like self-driving cars. We propose an\n                  architecture for using ROS2 for a self-driving car\n                  that enables safe and reliable real-time behaviour,\n                  but keeping the advantages of ROS such as a\n                  distributed architecture and standardised message\n                  types. First experiments with an automated real\n                  passenger car at lower and higher speed-levels show\n                  that our approach seems feasible for autonomous\n                  driving under the necessary real-time conditions.},\n  keywords     = {automobiles;control engineering computing;mobile\n                  robots;operating systems (computers);safety-critical\n                  software;ROS2;autonomous driving;robotic\n                  software;self-driving cars;automated real passenger\n                  car;self-driving car architecture;robot operating\n                  system;Self-driving car;autonomous\n                  driving;architecture;robot operating\n                  system;ROS;ROS2;LKAS;V2X},\n  doi          = {10.1109/SAUPEC/RobMech/PRASA48453.2020.9041020},\n  url_IEEEXplore = {https://ieeexplore.ieee.org/abstract/document/9041020},\n  ISBN         = {Electronic ISBN: 978-1-7281-4162-6, Print on Demand(PoD) ISBN: 978-1-7281-4163-3},\n}\n\n
\n
\n\n\n
\n In this paper we report on an architecture for a self-driving car that is based on ROS2. Self-driving cars have to take decisions based on their sensory input in real-time, providing high reliability with a strong demand in functional safety. In principle, self-driving cars are robots. However, typical robot software, in general, and the previous version of the Robot Operating System (ROS), in particular, does not always meet these requirements. With the successor ROS2 the situation has changed and it might be considered as a solution for automated and autonomous driving. Existing robotic software based on ROS was not ready for safety critical applications like self-driving cars. We propose an architecture for using ROS2 for a self-driving car that enables safe and reliable real-time behaviour, but keeping the advantages of ROS such as a distributed architecture and standardised message types. First experiments with an automated real passenger car at lower and higher speed-levels show that our approach seems feasible for autonomous driving under the necessary real-time conditions.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Virtual reality in preoperative imaging in maxillofacial surgery: implementation of “the next level”?.\n \n \n \n \n\n\n \n Bartella, A.; Kamal, M.; Scholl, I.; Schiffer, S.; Steegmann, J.; Ketelsen, D.; Hölzle, F.; and Lethaus, B.\n\n\n \n\n\n\n British Journal of Oral and Maxillofacial Surgery, 57(7): 644–648. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"VirtualPaper\n  \n \n \n \"Virtual bjoms\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{ Bartella:EtAl:BJOMS2019:VR-in-preop-Img,\n  title        = "Virtual reality in preoperative imaging in maxillofacial surgery: implementation of “the next level”?",\n  author       = "A.K. Bartella and M. Kamal and I. Scholl and S. Schiffer and\n                  J. Steegmann and D. Ketelsen and F. Hölzle and B. Lethaus",\n  journal      = "British Journal of Oral and Maxillofacial Surgery",\n  volume       = "57",\n  number       = "7",\n  pages        = "644--648",\n  year         = "2019",\n  issn         = "0266-4356",\n  doi          = "https://doi.org/10.1016/j.bjoms.2019.02.014",\n  url          = "http://www.sciencedirect.com/science/article/pii/S0266435619302104",\n  url_BJOMS    = {https://www.bjoms.com/article/S0266-4356(19)30210-4/fulltext},\n  keywords     = "VR, Virtual Reality, Preoperative Planning, CT, MRI",\n  abstract     = "Not only are current imaging techniques - cone-beam\n                  computed tomography (CT), CT, and magnetic resonance\n                  imaging (MRI) - becoming more precise in capturing\n                  data, but the illustration and interpretation of the\n                  acquired images is no longer limited to conventional\n                  display screens or projectors. The so-called\n                  “virtual reality” (VR) glasses have the potential to\n                  engage the viewer in a 3-dimensional space, and\n                  ultimately to enable evaluation of the reconstructed\n                  anatomical structures from a new perspective. For\n                  the first time in the field of oral and\n                  maxillofacial surgery (OMFS), a 3-dimensional\n                  imaging dataset (cone-beam CT, CT, and MRI) can be\n                  evaluated by using VR glasses. A medical student, an\n                  OMFS resident, and an OMFS consultant rated the\n                  preoperative usability of VR glasses to improve the\n                  operative understanding of three cases: a deeply\n                  impacted wisdom tooth, a fracture of the lower jaw,\n                  and an oncological resection. VR glasses seem to\n                  help to simplify operations and give the surgeon a\n                  good preoperative overview of the intraoperative\n                  findings, particularly in the evaluation of impacted\n                  teeth and hard tissue structures. In addition, VR\n                  glasses seem to be a promising innovation to help in\n                  the training of surgical residents and to teach\n                  students. However, the more experienced the surgeon,\n                  the smaller is the additional value of VR\n                  glasses. Preoperative examination using VR glasses\n                  can aid better understanding and planning of the\n                  surgical site in the future, and is an innovative\n                  piece of advanced technology for displaying CT,\n                  cone-beam CT, and MRI anatomical data.",\n}
\n
\n\n\n
\n Not only are current imaging techniques - cone-beam computed tomography (CT), CT, and magnetic resonance imaging (MRI) - becoming more precise in capturing data, but the illustration and interpretation of the acquired images is no longer limited to conventional display screens or projectors. The so-called “virtual reality” (VR) glasses have the potential to engage the viewer in a 3-dimensional space, and ultimately to enable evaluation of the reconstructed anatomical structures from a new perspective. For the first time in the field of oral and maxillofacial surgery (OMFS), a 3-dimensional imaging dataset (cone-beam CT, CT, and MRI) can be evaluated by using VR glasses. A medical student, an OMFS resident, and an OMFS consultant rated the preoperative usability of VR glasses to improve the operative understanding of three cases: a deeply impacted wisdom tooth, a fracture of the lower jaw, and an oncological resection. VR glasses seem to help to simplify operations and give the surgeon a good preoperative overview of the intraoperative findings, particularly in the evaluation of impacted teeth and hard tissue structures. In addition, VR glasses seem to be a promising innovation to help in the training of surgical residents and to teach students. However, the more experienced the surgeon, the smaller is the additional value of VR glasses. Preoperative examination using VR glasses can aid better understanding and planning of the surgical site in the future, and is an innovative piece of advanced technology for displaying CT, cone-beam CT, and MRI anatomical data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A System for Continuous Underground Site Mapping and Exploration.\n \n \n \n \n\n\n \n Ferrein, A.; Scholl, I.; Neumann, T.; Krückel, K.; and Schiffer, S.\n\n\n \n\n\n\n In Reyhanoglu, M.; and Cubber, G. D., editor(s), Unmanned Robotic Systems and Applications. IntechOpen, May 2019.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"A intech\n  \n \n \n \"A book\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@incollection{Ferrein:EtAl:InTech2019:SCUSME,\n  title        = {{A System for Continuous Underground Site Mapping and Exploration}},\n  author       = {Ferrein, Alexander and Scholl, Ingrid and Neumann, Tobias and Kr{\\"u}ckel, Kai and Schiffer, Stefan},\n  booktitle    = {Unmanned Robotic Systems and Applications},\n  editor       = {Mahmut Reyhanoglu and Geert De Cubber},\n  year         = {2019},\n  month        = may,\n  publisher    = {{IntechOpen}},\n  doi          = {10.5772/intechopen.85859},\n  url          = {https://doi.org/10.5772%2Fintechopen.85859},\n  url_InTech   = {https://www.intechopen.com/online-first/a-system-for-continuous-underground-site-mapping-and-exploration},\n  url_Book     = {https://www.intechopen.com/books/unmanned-robotic-systems-and-applications},\n  keywords     = {3D mapping, continuous mapping, large underground site mapping, mapping tools,\n                  point cloud registration, map exploration, map visualization},\n  abstract     = {3D mapping becomes ever more important not only in\n                  industrial mobile robotic applications for AGV and\n                  production vehicles but also for search and rescue\n                  scenarios. In this chapter we report on our work of\n                  mapping and exploring underground mines. Our\n                  contribution is two-fold: First, we present our\n                  custom-built 3D laser range platform SWAP and\n                  compare it against an architectural laser\n                  scanner. The advantages are that the mapping vehicle\n                  can scan in a continuous mode and does not have to\n                  do stop-and-go scanning. The second contribution is\n                  the mapping tool mapit which supports and automates\n                  the registration of large sets of point clouds. The\n                  idea behind mapit is to keep the raw point cloud\n                  data as a basis for any map generation and only\n                  store all operations executed on the point\n                  clouds. This way the initial data do not get lost,\n                  and improvements on low-level date (e.g. improved\n                  transforms through loop closure) will automatically\n                  improve the final maps. Finally, we also present\n                  methods for visualization and interactive\n                  exploration of such maps.},\n}\n\n
\n
\n\n\n
\n 3D mapping becomes ever more important not only in industrial mobile robotic applications for AGV and production vehicles but also for search and rescue scenarios. In this chapter we report on our work of mapping and exploring underground mines. Our contribution is two-fold: First, we present our custom-built 3D laser range platform SWAP and compare it against an architectural laser scanner. The advantages are that the mapping vehicle can scan in a continuous mode and does not have to do stop-and-go scanning. The second contribution is the mapping tool mapit which supports and automates the registration of large sets of point clouds. The idea behind mapit is to keep the raw point cloud data as a basis for any map generation and only store all operations executed on the point clouds. This way the initial data do not get lost, and improvements on low-level date (e.g. improved transforms through loop closure) will automatically improve the final maps. Finally, we also present methods for visualization and interactive exploration of such maps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Calibration of a Rotating or Revolving Platform with a LiDAR Sensor.\n \n \n \n\n\n \n Claer, M.; Ferrein, A.; and Schiffer, S.\n\n\n \n\n\n\n Applied Sciences, 9(11): 2238. January 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Claer:Ferrein:Schiffer_ApplSci2019_Calibration,\n  author       = {Claer, Mario and Ferrein, Alexander and Schiffer, Stefan},\n  title        = {Calibration of a {{Rotating}} or {{Revolving Platform}} with a {{LiDAR Sensor}}},\n  journal      = {Applied Sciences},\n  year         = {2019},\n  month        = jan,\n  volume       = {9},\n  number       = {11},\n  pages        = {2238},\n  doi          = {10.3390/app9112238},\n  language     = {en},\n  copyright    = {http://creativecommons.org/licenses/by/3.0/},\n  keywords     = {calibration,extrinsic parameter,LiDAR,LRF},\n  abstract     = {Perceiving its environment in 3D is an important\n                  ability for a modern robot. Today, this is often\n                  done using LiDARs which come with a strongly limited\n                  field of view (FOV), however. To extend their FOV,\n                  the sensors are mounted on driving vehicles in\n                  several different ways. This allows 3D perception\n                  even with 2D LiDARs if a corresponding localization\n                  system or technique is available. Another popular\n                  way to gain most information of the scanners is to\n                  mount them on a rotating carrier platform. In this\n                  way, their measurements in different directions can\n                  be collected and transformed into a common frame, in\n                  order to achieve a nearly full spherical\n                  perception. However, this is only possible if the\n                  kinetic chains of the platforms are known exactly,\n                  that is, if the LiDAR pose w.r.t. to its rotation\n                  center is well known. The manual measurement of\n                  these chains is often very cumbersome or sometimes\n                  even impossible to do with the necessary\n                  precision. Our paper proposes a method to calibrate\n                  the extrinsic LiDAR parameters by decoupling the\n                  rotation from the full six degrees of freedom\n                  transform and optimizing both separately. Thus, one\n                  error measure for the orientation and one for the\n                  translation with known orientation are minimized\n                  subsequently with a combination of a consecutive\n                  grid search and a gradient descent. Both error\n                  measures are inferred from spherical calibration\n                  targets. Our experiments with the method suggest\n                  that the main influences on the calibration results\n                  come from the the distance to the calibration\n                  targets, the accuracy of their center point\n                  estimation and the search grid resolution. However,\n                  our proposed calibration method improves the\n                  extrinsic parameters even with unfavourable\n                  configurations and from inaccurate initial pose\n                  guesses.},\n}\n\n
\n
\n\n\n
\n Perceiving its environment in 3D is an important ability for a modern robot. Today, this is often done using LiDARs which come with a strongly limited field of view (FOV), however. To extend their FOV, the sensors are mounted on driving vehicles in several different ways. This allows 3D perception even with 2D LiDARs if a corresponding localization system or technique is available. Another popular way to gain most information of the scanners is to mount them on a rotating carrier platform. In this way, their measurements in different directions can be collected and transformed into a common frame, in order to achieve a nearly full spherical perception. However, this is only possible if the kinetic chains of the platforms are known exactly, that is, if the LiDAR pose w.r.t. to its rotation center is well known. The manual measurement of these chains is often very cumbersome or sometimes even impossible to do with the necessary precision. Our paper proposes a method to calibrate the extrinsic LiDAR parameters by decoupling the rotation from the full six degrees of freedom transform and optimizing both separately. Thus, one error measure for the orientation and one for the translation with known orientation are minimized subsequently with a combination of a consecutive grid search and a gradient descent. Both error measures are inferred from spherical calibration targets. Our experiments with the method suggest that the main influences on the calibration results come from the the distance to the calibration targets, the accuracy of their center point estimation and the search grid resolution. However, our proposed calibration method improves the extrinsic parameters even with unfavourable configurations and from inaccurate initial pose guesses.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Optimized Method for 3D Body Scanning Applications Based on KinectFusion.\n \n \n \n \n\n\n \n Alhwarin, F.; Schiffer, S.; Ferrein, A.; and Scholl, I.\n\n\n \n\n\n\n In Cliquet Jr., A.; Wiebe, S.; Anderson, P.; Saggio, G.; Zwiggelaar, R.; Gamboa, H.; Fred, A.; and Bermúdez i Badia, S., editor(s), Biomedical Engineering Systems and Technologies, pages 100–113, Cham, 2019. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"An springer\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Alhwarin:Schiffer:Ferrein:Scholl_BIOIMAGING2018-Book_OptKinFuBodyScan,\n  author       = "Alhwarin, Faraj and Schiffer, Stefan and Ferrein, Alexander and Scholl, Ingrid",\n  title        = "{An Optimized Method for {3D} Body Scanning Applications Based on {KinectFusion}}",\n  booktitle    = "Biomedical Engineering Systems and Technologies",\n  editor       = "Cliquet Jr., Alberto and Wiebe, Sheldon and Anderson, Paul and Saggio, Giovanni and\n                  Zwiggelaar, Reyer and Gamboa, Hugo and Fred, Ana and Berm{\\'u}dez i Badia, Sergi",\n  year         = "2019",\n  publisher    = "Springer International Publishing",\n  address      = "Cham",\n  pages        = "100--113",\n  url_springer = {https://link.springer.com/chapter/10.1007/978-3-030-29196-9_6},\n  isbn         = "978-3-030-29196-9",\n  abstract     = "KinectFusion is a powerful method for 3D\n                  reconstruction of indoor scenes. It uses a Kinect\n                  camera and tracks camera motion in real-time by\n                  applying ICP method on successive captured depth\n                  frames. Then it merges depth frames according their\n                  positions into a 3D model. Unfortunately the model\n                  accuracy is not sufficient for body scanner\n                  applications because the sensor depth noise affects\n                  the camera motion tracking and deforms the\n                  reconstructed model. In this paper we introduce a\n                  modification of the KinectFusion method for specific\n                  3D body scanning applications. Our idea is based on\n                  the fact that, most body scanners are designed so\n                  that the camera trajectory is a fixed circle in the\n                  3D space. Therefore each camera position can be\n                  determined as a rotation angle around a fixed axis\n                  (rotation axis) passing through a fixed point\n                  (rotation center). Because the rotation axis and the\n                  rotation center are always fixed, they can be\n                  estimated offline while filtering out depth noise\n                  through averaging many depth frames. The rotation\n                  angle can be also precisely measured by equipping\n                  the scanner motor with an angle sensor.",\n}\n
\n
\n\n\n
\n KinectFusion is a powerful method for 3D reconstruction of indoor scenes. It uses a Kinect camera and tracks camera motion in real-time by applying ICP method on successive captured depth frames. Then it merges depth frames according their positions into a 3D model. Unfortunately the model accuracy is not sufficient for body scanner applications because the sensor depth noise affects the camera motion tracking and deforms the reconstructed model. In this paper we introduce a modification of the KinectFusion method for specific 3D body scanning applications. Our idea is based on the fact that, most body scanners are designed so that the camera trajectory is a fixed circle in the 3D space. Therefore each camera position can be determined as a rotation angle around a fixed axis (rotation axis) passing through a fixed point (rotation center). Because the rotation axis and the rotation center are always fixed, they can be estimated offline while filtering out depth noise through averaging many depth frames. The rotation angle can be also precisely measured by equipping the scanner motor with an angle sensor.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ERIKA – Early Robotics Introduction at Kindergarten Age.\n \n \n \n \n\n\n \n Schiffer, S.; and Ferrein, A.\n\n\n \n\n\n\n Multimodal Technologies and Interaction, 2(4). 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ERIKAPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{ Schiffer:Ferrein:MTI2018:ERiKA,\n  author       = {Schiffer, Stefan and Ferrein, Alexander},\n  title        = {{ERIKA} -- {Early Robotics Introduction at Kindergarten Age}},\n  journal      = {Multimodal Technologies and Interaction},\n  year         = {2018},\n  volume       = {2},\n  number       = {4},\n  article-number = {64},\n  url          = {http://www.mdpi.com/2414-4088/2/4/64},\n  issn         = {2414-4088},\n  abstract     = {In this work, we report on our attempt to design and\n                  implement an early introduction to basic robotics\n                  principles for children at kindergarten age. One of\n                  the main challenges of this effort is to explain\n                  complex robotics contents in a way that pre-school\n                  children could follow the basic principles and ideas\n                  using examples from their world of experience. What\n                  sets apart our effort from other work is that part\n                  of the lecturing is actually done by a robot itself\n                  and that a quiz at the end of the lesson is done\n                  using robots as well. The humanoid robot Pepper from\n                  Softbank, which is a great platform for human-robot\n                  interaction experiments, was used to present a\n                  lecture on robotics by reading out the contents to\n                  the children making use of its speech synthesis\n                  capability. A quiz in a Runaround-game-show style\n                  after the lecture activated the children to recap\n                  the contents they acquired about how mobile robots\n                  work in principle. In this quiz, two LEGO Mindstorm\n                  EV3 robots were used to implement a strongly\n                  interactive scenario. Besides the thrill of being\n                  exposed to a mobile robot that would also react to\n                  the children, they were very excited and at the same\n                  time very concentrated. We got very positive\n                  feedback from the children as well as from their\n                  educators. To the best of our knowledge, this is one\n                  of only few attempts to use a robot like Pepper not\n                  as a tele-teaching tool, but as the teacher itself\n                  in order to engage pre-school children with complex\n                  robotics contents.},\n  doi          = {10.3390/mti2040064},\n}\n\n\n
\n
\n\n\n
\n In this work, we report on our attempt to design and implement an early introduction to basic robotics principles for children at kindergarten age. One of the main challenges of this effort is to explain complex robotics contents in a way that pre-school children could follow the basic principles and ideas using examples from their world of experience. What sets apart our effort from other work is that part of the lecturing is actually done by a robot itself and that a quiz at the end of the lesson is done using robots as well. The humanoid robot Pepper from Softbank, which is a great platform for human-robot interaction experiments, was used to present a lecture on robotics by reading out the contents to the children making use of its speech synthesis capability. A quiz in a Runaround-game-show style after the lecture activated the children to recap the contents they acquired about how mobile robots work in principle. In this quiz, two LEGO Mindstorm EV3 robots were used to implement a strongly interactive scenario. Besides the thrill of being exposed to a mobile robot that would also react to the children, they were very excited and at the same time very concentrated. We got very positive feedback from the children as well as from their educators. To the best of our knowledge, this is one of only few attempts to use a robot like Pepper not as a tele-teaching tool, but as the teacher itself in order to engage pre-school children with complex robotics contents.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the Workshop on Teaching Robotics with ROS (held at ERF 2018) (TRROS2018).\n \n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; Bharatheesha, M.; and Corbato, C. H.,\n editors.\n \n\n\n \n\n\n\n of CEUR Workshop Proceedings.Aachen, 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{TRROS2018,\n  booktitle    = {Workshop on Teaching Robotics with ROS (held at the European Robotics Forum) (TRROS2018)},\n  title        = {Proceedings of the Workshop on Teaching Robotics with ROS (held at ERF 2018) (TRROS2018)},\n  year         = 2018,\n  editor       = {Stefan Schiffer and Alexander Ferrein and Mukunda Bharatheesha and Carlos Hern{\\'a}ndez Corbato},\n  number       = 2329,\n  series       = {CEUR Workshop Proceedings},\n  address      = {Aachen},\n  issn         = {1613-0073},\n  url          = {http://ceur-ws.org/Vol-2329/},\n  venue        = {Tampere, Finland},\n  eventdate    = {2018-03-15},\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n golog++ : An Integrative System Design.\n \n \n \n \n\n\n \n Mataré, V.; Schiffer, S.; and Ferrein, A.\n\n\n \n\n\n\n In Steinbauer, G.; and Ferrein, A., editor(s), Proceedings of the 11th Cognitive Robotics Workshop 2018 (CogRob), of CEUR Workshop Proceedings, pages 29–35, Aachen, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"golog++Paper\n  \n \n \n \"golog++ proc\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Matare:EtAl:CogRob2018:gologpp,\n  title     = {golog++ : An Integrative System Design},\n  author    = {Victor Mataré and Stefan Schiffer and Alexander Ferrein},\n  pages     = {29--35},\n  booktitle = {Proceedings of the 11th Cognitive Robotics Workshop 2018 (CogRob)},\n  year      = 2018,\n  editor    = {Gerald Steinbauer and Alexander Ferrein},\n  number    = 2325,\n  series    = {CEUR Workshop Proceedings},\n  address   = {Aachen},\n  issn      = {1613-0073},\n  url       = {http://ceur-ws.org/Vol-2325/#paper-06},\n  url_proc  = {http://ceur-ws.org/Vol-2325/},\n  venue     = {Tempe, AZ, USA},\n  eventdate = {2018-10-27},\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Constraint-based online transformation of abstract plans into executable robot actions.\n \n \n \n \n\n\n \n Hofmann, T.; Mataré, V.; Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Srivastava, S.; Zhang, S.; Hawes, N.; Karpas, E.; Konidaris, G.; Leonetti, M.; Sridharan, M.; and Wyatt, J., editor(s), Proceedings of the 2018 AAAI Spring Symposium on Integrating Representation, Reasoning, Learning, and Execution for Goal Directed Autonomy, pages 549–553, March 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Constraint-based symposium\n  \n \n \n \"Constraint-based session\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{HofmannEtAl:AAAI-SS2018-SIRLE:ConTrAkt,\n  author       = {Till Hofmann and Victor Matar{\\'e} and Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer},\n  title        = {Constraint-based online transformation of abstract plans into executable robot actions},\n  booktitle    = {Proceedings of the 2018 AAAI Spring Symposium on\n                  Integrating Representation, Reasoning, Learning, and Execution for Goal Directed Autonomy},\n  editor       = {Siddharth Srivastava and Shiqi Zhang and Nick Hawes and Erez Karpas\n                  and George Konidaris and Matteo Leonetti and Mohan Sridharan and Jeremy Wyatt},\n  year         = {2018},\n  month        = {March},\n  day          = {26--28},\n  location     = {Stanford University, CA, USA},\n  pages        = {549--553},\n  url_Symposium = {http://siddharthsrivastava.net/sirle18/},\n  url_Session  = {https://aaai.org/Symposia/Spring/sss18symposia.php#ss06},\n  OPTnote         = {to appear},\n  abstract     = {In this paper, we are concerned with making the\n                  execution of abstract action plans for robotic\n                  agents more robust. To this end, we propose to model\n                  the internals of a robot system and its ties to the\n                  actions that the robot can perform. Based on these\n                  models, we propose an online transformation of an\n                  abstract plan into executable actions conforming\n                  with system specifics. With our framework we aim to\n                  achieve two goals. For one, modeling the system\n                  internals is beneficial in its own right in order to\n                  achieve longer term autonomy as well as system\n                  transparency and comprehensibility. For another,\n                  separating the system details from determining the\n                  course of action on an abstract level leverages the\n                  use of planning for actual robotic systems.},\n}\n
\n
\n\n\n
\n In this paper, we are concerned with making the execution of abstract action plans for robotic agents more robust. To this end, we propose to model the internals of a robot system and its ties to the actions that the robot can perform. Based on these models, we propose an online transformation of an abstract plan into executable actions conforming with system specifics. With our framework we aim to achieve two goals. For one, modeling the system internals is beneficial in its own right in order to achieve longer term autonomy as well as system transparency and comprehensibility. For another, separating the system details from determining the course of action on an abstract level leverages the use of planning for actual robotic systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Direct Volume Rendering in Virtual Reality.\n \n \n \n\n\n \n Scholl, I.; Suder, S.; and Schiffer, S.\n\n\n \n\n\n\n In Maier, A.; Deserno, T. M.; Handels, H.; Maier-Hein, K. H.; Palm, C.; and Tolxdorff, T., editor(s), Bildverarbeitung für die Medizin 2018, pages 297–302, Berlin, Heidelberg, March 2018. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ Scholl:Suder:Schiffer:BVM2018:MedicVR,\n  title        = "{Direct Volume Rendering in Virtual Reality}",\n  author       = "Scholl, Ingrid and Suder, Sebastian and Schiffer, Stefan",\n  editor       = "Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus Hermann and Palm, Christoph and Tolxdorff, Thomas",\n  booktitle    = "Bildverarbeitung f{\\"u}r die Medizin 2018",\n  year         = "2018",\n  month        = "March",\n  day          = "10--12",\n  publisher    = "Springer",\n  address      = "Berlin, Heidelberg",\n  pages        = "297--302",\n  isbn         = "978-3-662-56537-7",\n  doi          = "10.1007/978-3-662-56537-7_79",\n  springerlink = "https://link.springer.com/chapter/10.1007/978-3-662-56537-7_79",\n  abstract     = "Direct Volume Rendering (DVR) techniques are used to\n                  visualize surfaces from 3D volume data sets, without\n                  computing a 3D geometry. Several surfaces can be\n                  classified using a transfer function by assigning\n                  optical properties like color and opacity\n                  (RGB$\\alpha$) to the voxel data. Finding a good\n                  transfer function in order to separate specific\n                  structures from the volume data set, is in general a\n                  manual and time-consuming procedure, and requires\n                  detailed knowledge of the data and the image\n                  acquisition technique. In this paper, we present a\n                  new Virtual Reality (VR) application based on the\n                  HTC Vive headset. Onedimensional transfer functions\n                  can be designed in VR while continuously rendering\n                  the stereoscopic image pair through massively\n                  parallel GPUbased ray casting shader techniques. The\n                  usability of the VR application is evaluated.",\n}\n\n
\n
\n\n\n
\n Direct Volume Rendering (DVR) techniques are used to visualize surfaces from 3D volume data sets, without computing a 3D geometry. Several surfaces can be classified using a transfer function by assigning optical properties like color and opacity (RGB$α$) to the voxel data. Finding a good transfer function in order to separate specific structures from the volume data set, is in general a manual and time-consuming procedure, and requires detailed knowledge of the data and the image acquisition technique. In this paper, we present a new Virtual Reality (VR) application based on the HTC Vive headset. Onedimensional transfer functions can be designed in VR while continuously rendering the stereoscopic image pair through massively parallel GPUbased ray casting shader techniques. The usability of the VR application is evaluated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimized KinectFusion Algorithm for 3D Scanning Applications.\n \n \n \n \n\n\n \n Alhwarin, F.; Schiffer, S.; Ferrein, A.; and Scholl, I.\n\n\n \n\n\n\n In Proceedings of the 11th International Joint Conference on Biomedical Engineering Systems and Technologies, volume 2: BIOIMAGING, of (BIOSTEC 2018), pages 50–57, 2018. INSTICC, SciTePress\n Best Paper Candidate (Short List)\n\n\n\n
\n\n\n\n \n \n \"Optimized scitepress\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{AlhwarinEtAl:BIOIMAGING2018:OptimizedKinectFusion,\n  author       = {Faraj Alhwarin and Stefan Schiffer and Alexander Ferrein and Ingrid Scholl},\n  title        = {{Optimized KinectFusion Algorithm for 3D Scanning Applications}},\n  booktitle    = {Proceedings of the 11th International Joint Conference on Biomedical Engineering Systems and Technologies},\n  volume       = {2: BIOIMAGING},\n  series       = {(BIOSTEC 2018)},\n  year         = {2018},\n  pages        = {50--57},\n  publisher    = {SciTePress},\n  organization = {INSTICC},\n  doi          = {10.5220/0006594700500057},\n  isbn         = {978-989-758-278-3},\n  url_scitepress = {http://www.scitepress.org/PublicationsDetail.aspx?ID=dZs8lGPb760=&t=1},\n  abstract     = {KinectFusion is an effective way to reconstruct\n                  indoor scenes. It takes a depth image stream and\n                  uses the iterative closests point (ICP) method to\n                  estimate the camera motion. Then it merges the\n                  images in a volume to construct a 3D model. The\n                  model accuracy is not satisfactory for certain\n                  applications such as scanning a human body to\n                  provide information about bone structure health. For\n                  one reason, camera noise and noise in the ICP method\n                  limit the accuracy. For another, the error in\n                  estimating the global camera poses accumulates. In\n                  this paper, we present a method to optimize\n                  KinectFusion for 3D scanning in the above\n                  scenarios. We aim to reduce the noise influence on\n                  camera pose tracking. The idea is as follows: in our\n                  application scenarios we can always assume that\n                  either the camera rotates around the object to be\n                  scanned or that the object rotates in front of the\n                  camera. In both cases, the relative camera/object\n                  pose is located on a 3D-circle. Therefore, camera\n                  motion can be described as a rotation around a fixed\n                  axis passing through a fixed point. Since the axis\n                  and the center of rotation are always fixed, the\n                  error averaging principle can be utilized to reduce\n                  the noise impact and hence to enhance the 3D model\n                  accuracy of scanned object.},\n  note         = {Best Paper Candidate (Short List)},\n}\n\n
\n
\n\n\n
\n KinectFusion is an effective way to reconstruct indoor scenes. It takes a depth image stream and uses the iterative closests point (ICP) method to estimate the camera motion. Then it merges the images in a volume to construct a 3D model. The model accuracy is not satisfactory for certain applications such as scanning a human body to provide information about bone structure health. For one reason, camera noise and noise in the ICP method limit the accuracy. For another, the error in estimating the global camera poses accumulates. In this paper, we present a method to optimize KinectFusion for 3D scanning in the above scenarios. We aim to reduce the noise influence on camera pose tracking. The idea is as follows: in our application scenarios we can always assume that either the camera rotates around the object to be scanned or that the object rotates in front of the camera. In both cases, the relative camera/object pose is located on a 3D-circle. Therefore, camera motion can be described as a rotation around a fixed axis passing through a fixed point. Since the axis and the center of rotation are always fixed, the error averaging principle can be utilized to reduce the noise impact and hence to enhance the 3D model accuracy of scanned object.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The ROSIN Education Concept: Fostering ROS Industrial-related robotics education in Europe.\n \n \n \n \n\n\n \n Ferrein, A.; Schiffer, S.; and Kallweit, S.\n\n\n \n\n\n\n In Proceedings of the Third Iberian Robotics Conference (ROBOT'2017), November 22-24 2017. \n \n\n\n\n
\n\n\n\n \n \n \"The programme\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Ferrein:Schiffer:Kallweit:ROBOT2017:ROSIN-Education-Concept,\n  author       = {Alexander Ferrein and Stefan Schiffer and Stephan Kallweit},\n  title        = {The {ROSIN} Education Concept: Fostering {ROS Industrial}-related robotics education in {Europe}},\n  booktitle    = {Proceedings of the Third Iberian Robotics Conference (ROBOT'2017)},\n  OPTeditor       = {Aníbal Ollero and Alberto Sanfeliu and Luis Montano and Nuno Lau and Carlos Cardeira},\n  OPTvolume       = {},\n  OPTpages        = {XX--YY},\n  year         = {2017},\n  month        = {November 22-24},\n  location     = {Sevilla, Spain},\n  url_Programme = {http://easychair.org/smart-program/ROBOT2017/2017-11-23.html#talk:56928},\n  abstract     = {ROS Industrial (ROS-I) is an effort to deploy the\n                  Robot Operating System (ROS) for industrial\n                  manufacturing applications. The ROS-I activities are\n                  organised by the ROS Industrial consortium\n                  (RIC). With the EU-funded project ROSIN, which\n                  started in 2017, the ROS-I activities are further\n                  supported. The project will give out funds for\n                  developing ROS-I components. As a further important\n                  measure, the ROSIN project focuses on education\n                  measures for training a large number of students and\n                  industry professionals to become specialists in\n                  ROS-I. In this paper, we outline the broad ROSIN\n                  education programme, which consists of a series of\n                  summer schools, a professional academy and intends\n                  to provide the course contents in Massive Open\n                  Online Courses as well.},\n}\n
\n
\n\n\n
\n ROS Industrial (ROS-I) is an effort to deploy the Robot Operating System (ROS) for industrial manufacturing applications. The ROS-I activities are organised by the ROS Industrial consortium (RIC). With the EU-funded project ROSIN, which started in 2017, the ROS-I activities are further supported. The project will give out funds for developing ROS-I components. As a further important measure, the ROSIN project focuses on education measures for training a large number of students and industry professionals to become specialists in ROS-I. In this paper, we outline the broad ROSIN education programme, which consists of a series of summer schools, a professional academy and intends to provide the course contents in Massive Open Online Courses as well.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ReVolVR – Rendering Volume Data in VR using HTC Vive.\n \n \n \n \n\n\n \n Suder, S.; Schiffer, S.; and Scholl, I.\n\n\n \n\n\n\n In GTC Europe 2017, October 10-12 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ReVolVRPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ Suder:Scholl:Schiffer:GTC-Europe2017:ReVolVR,\n  title        = {{ReVolVR} -- Rendering Volume Data in {VR} using {HTC Vive}},\n  author       = {Sebastian Suder and Stefan Schiffer and Ingrid Scholl},\n  booktitle    = {GTC Europe 2017},\n  year         = {2017},\n  month        = {October 10-12},\n  location     = {Munich},\n  ID           = {ID 23055},\n  keywords     = {Virtual Reality, Volumen Rendering, HTC Vive},\n  url          = {http://on-demand-gtc.gputechconf.com/gtc-quicklink/PzLIdd},\n  abstract     = {ReVolVR is a new Virtual Reality (VR) volume\n                  rendering application based on the HTC Vive VR\n                  technique. The application uses the ray casting\n                  algorithm for direct volume rendering. Ray casting\n                  needs a transfer function to classify several\n                  surfaces. To find a good transfer function is in\n                  general a manual and time-consuming procedure and\n                  requires detailed knowledge of the data. With\n                  ReVolVr, the transfer function can be modified in\n                  the virtual scene while continuously real-time\n                  rendering the stereoscopic 3D volume through\n                  GPU-based ray casting shader. All interactions are\n                  designed to conveniently reflect to real movements\n                  of the user.},\n}\n\n\n
\n
\n\n\n
\n ReVolVR is a new Virtual Reality (VR) volume rendering application based on the HTC Vive VR technique. The application uses the ray casting algorithm for direct volume rendering. Ray casting needs a transfer function to classify several surfaces. To find a good transfer function is in general a manual and time-consuming procedure and requires detailed knowledge of the data. With ReVolVr, the transfer function can be modified in the virtual scene while continuously real-time rendering the stereoscopic 3D volume through GPU-based ray casting shader. All interactions are designed to conveniently reflect to real movements of the user.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Decentralised System Approach for Controlling AGVs with ROS.\n \n \n \n \n\n\n \n Walenta, R.; Schellekens, T.; Ferrein, A.; and Schiffer, S.\n\n\n \n\n\n\n In Proceedings of the IEEE AFRICON 2017, September 18-20 2017. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"A conference\n  \n \n \n \"A session\n  \n \n \n \"A programme\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Walenta:Schellekens:Ferrein:Schiffer:AFRICON2017:DecentralisedAGVsROS,\n  author       = {Robert Walenta and Twan Schellekens and Alexander Ferrein and Stefan Schiffer},\n  title        = {A Decentralised System Approach for Controlling {AGVs} with {ROS}},\n  booktitle    = {Proceedings of the IEEE AFRICON 2017},\n  OPTeditor       = {Jan Haase and Gerhard Hancke and Albert Lysko},\n  OPTvolume       = {},\n  OPTpages        = {XX--YY},\n  year         = {2017},\n  month        = {September 18-20},\n  location     = {Cape Town, South Africa},\n  publisher    = {IEEE},\n  OPTisbn         = {},\n  OPTissn         = {},\n  url_Conference = {http://africon2017.org/},\n  url_Session    = {http://fftfridays.co.za/afric/program-dsr.html#s101},\n  url_Programme  = {http://fftfridays.co.za/afric/program-dsr.html#p009989},\n  abstract     = {The current industrial state of the art for\n                  automated guided vehicles relies on centralised\n                  controllers dispatching transports to vehicles along\n                  predefined paths.\n                  The design of these paths is time-consuming and has\n                  to be done in advance, followed by an extensive\n                  testing phase. In the field of mobile robotics,\n                  robust path planning and navigation algorithms\n                  exist.  However, they have not yet found their way\n                  into industrial applications in larger numbers.\n                  In this paper, we present a system architecture for\n                  a decentralised control of multiple automated guided\n                  vehicles performing material transportation tasks in\n                  intra-logistic applications which is based on mobile\n                  robotics solutions. The proposed system includes\n                  solutions for self-localisation, behaviour control,\n                  conflict-free routing and motion control. The\n                  non-centralised control of the system architecture\n                  allows for dynamic path planning and traffic\n                  coordination. Its implementation is based on the\n                  Robot Operating System, the de-facto standard\n                  middleware for robotics applications.  We give an\n                  overview of the overall system architecture as well\n                  as the coordination mechanisms and show a first\n                  proof of concept in simulations.},\n}\n
\n
\n\n\n
\n The current industrial state of the art for automated guided vehicles relies on centralised controllers dispatching transports to vehicles along predefined paths. The design of these paths is time-consuming and has to be done in advance, followed by an extensive testing phase. In the field of mobile robotics, robust path planning and navigation algorithms exist. However, they have not yet found their way into industrial applications in larger numbers. In this paper, we present a system architecture for a decentralised control of multiple automated guided vehicles performing material transportation tasks in intra-logistic applications which is based on mobile robotics solutions. The proposed system includes solutions for self-localisation, behaviour control, conflict-free routing and motion control. The non-centralised control of the system architecture allows for dynamic path planning and traffic coordination. Its implementation is based on the Robot Operating System, the de-facto standard middleware for robotics applications. We give an overview of the overall system architecture as well as the coordination mechanisms and show a first proof of concept in simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Assisted Feature Engineering and Feature Learning to Build Knowledge-Based Agents for Arcade Games.\n \n \n \n\n\n \n Andelefski, B.; and Schiffer, S.\n\n\n \n\n\n\n In van den Herik, J.; Rocha, A. P.; and Filipe, J., editor(s), ICAART 2017 - Proceedings of the 4th International Conference on Agents and Artificial Intelligence, volume 2 - Artificial Intelligence, pages 228–238, February 6-8 2017. SciTePress\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Andelefski:Schiffer:ICAART2017:AssistedFeatureLearning,\n  author       = {Bastian Andelefski and Stefan Schiffer},\n  title        = {Assisted Feature Engineering and Feature Learning to Build Knowledge-Based Agents for Arcade Games},\n  year         = {2017},\n  pages        = {228--238},\n  editor       = {Jaap van den Herik  and Ana Paula Rocha and Joaquim Filipe},\n  booktitle    = {ICAART 2017 - Proceedings of the 4th International Conference on Agents and Artificial Intelligence},\n  volume       = {2 - Artificial Intelligence},\n  location     = {Porto, Portugal},\n  month        = {February 6-8},\n  publisher    = {SciTePress},\n  isbn         = {978-989-758-220-2},\n  abstract     = {Human knowledge can greatly increase the performance\n                  of autonomous agents. Leveraging this knowledge is\n                  sometimes neither straightforward nor easy. In this\n                  paper, we present an approach for assisted feature\n                  engineering and feature learning to build\n                  knowledge-based agents for three arcade games within\n                  the Arcade Learning Environment. While existing\n                  approaches mostly use model-free approaches we aim\n                  at creating a descriptive set of features for world\n                  modelling and building agents. To this end, we\n                  provide (visual) assistance in identifying and\n                  modelling features from RAM, we allow for learning\n                  features based on labeled game data, and we allow\n                  for creating basic agents using the above\n                  features. In our evaluation, we compare different\n                  methods to learn features from the RAM. We then\n                  compare several agents using different sets of\n                  manual and learned features with one another and\n                  with the state-of-the-art.},\n}\n
\n
\n\n\n
\n Human knowledge can greatly increase the performance of autonomous agents. Leveraging this knowledge is sometimes neither straightforward nor easy. In this paper, we present an approach for assisted feature engineering and feature learning to build knowledge-based agents for three arcade games within the Arcade Learning Environment. While existing approaches mostly use model-free approaches we aim at creating a descriptive set of features for world modelling and building agents. To this end, we provide (visual) assistance in identifying and modelling features from RAM, we allow for learning features based on labeled game data, and we allow for creating basic agents using the above features. In our evaluation, we compare different methods to learn features from the RAM. We then compare several agents using different sets of manual and learned features with one another and with the state-of-the-art.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Why it is harder to run RoboCup in South Africa: Experiences from German South African collaborations.\n \n \n \n \n\n\n \n Ferrein, A.; Schiffer, S.; Booysen, T.; and Stopforth, R.\n\n\n \n\n\n\n International Journal of Advanced Robotic Systems, 13(5). 2016.\n \n\n\n\n
\n\n\n\n \n \n \"WhyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ Ferrein:Schiffer:Booysen:Stopforth:IJARS2016:RoboCup-SA-DE,\n  author       = {Ferrein, Alexander and Schiffer, Stefan and Booysen, Tracy and Stopforth, Riaan}, \n  title        = {Why it is harder to run {RoboCup} in {South Africa}: Experiences from {German} {South African} collaborations},\n  journal      = {International Journal of Advanced Robotic Systems},\n  volume       = {13}, \n  number       = {5}, \n  year         = {2016}, \n  doi          = {10.1177/1729881416662789}, \n  URL          = {http://arx.sagepub.com/content/13/5/1729881416662789.abstract}, \n  eprint       = {http://arx.sagepub.com/content/13/5/1729881416662789.full.pdf+html}, \n  abstract     = {Robots are widely used as a vehicle to spark\n                  interest in science and technology in learners. A\n                  number of initiatives focus on this issue, for\n                  instance, the Roberta Initiative, the FIRST Lego\n                  League, the World Robot Olympiad and RoboCup\n                  Junior. Robotic competitions are valuable not only\n                  for school learners but also for university\n                  students, as the RoboCup initiative shows. Besides\n                  technical skills, the students get some project\n                  exposure and experience what it means to finish\n                  their tasks on time. But qualifying students for\n                  future high-tech areas should not only be for\n                  students from developed countries. In this article,\n                  we present our experiences with research and\n                  education in robotics within the RoboCup initiative,\n                  in Germany and South Africa; we report on our\n                  experiences with trying to get the RoboCup\n                  initiative in South Africa going. RoboCup has a huge\n                  support base of academic institutions in Germany;\n                  this is not the case in South Africa. We present our\n                  ‘north–south’ collaboration initiatives in RoboCup\n                  between Germany and South Africa and discuss some of\n                  the reasons why we think it is harder to run RoboCup\n                  in South Africa.}, \n}\n\n
\n
\n\n\n
\n Robots are widely used as a vehicle to spark interest in science and technology in learners. A number of initiatives focus on this issue, for instance, the Roberta Initiative, the FIRST Lego League, the World Robot Olympiad and RoboCup Junior. Robotic competitions are valuable not only for school learners but also for university students, as the RoboCup initiative shows. Besides technical skills, the students get some project exposure and experience what it means to finish their tasks on time. But qualifying students for future high-tech areas should not only be for students from developed countries. In this article, we present our experiences with research and education in robotics within the RoboCup initiative, in Germany and South Africa; we report on our experiences with trying to get the RoboCup initiative in South Africa going. RoboCup has a huge support base of academic institutions in Germany; this is not the case in South Africa. We present our ‘north–south’ collaboration initiatives in RoboCup between Germany and South Africa and discuss some of the reasons why we think it is harder to run RoboCup in South Africa.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Rotating Platform for Swift Acquisition of Dense 3D Point Clouds.\n \n \n \n \n\n\n \n Neumann, T.; Dülberg, E.; Schiffer, S.; and Ferrein, A.\n\n\n \n\n\n\n In Kubota, N.; Kiguchi, K.; Liu, H.; and Obo, T., editor(s), Proceedings of the 9th International Conference on Intelligent Robotics and Applications (ICIRA 2016), Part I, pages 257–268, Cham, August 22-24 2016. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"A spinger\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Inproceedings{ Neumann:Duellberg:Schiffer:Ferrein:ICIRA2016:RotatingPlatform,\n  author       = "Neumann, Tobias and D{\\"u}lberg, Enno and Schiffer, Stefan and Ferrein, Alexander",\n  title        = "A Rotating Platform for Swift Acquisition of Dense 3D Point Clouds",\n  booktitle    = "Proceedings of the 9th International Conference on Intelligent Robotics and Applications (ICIRA 2016), Part I",\n  editor       = "Kubota, Naoyuki and Kiguchi, Kazuo and Liu, Honghai and Obo, Takenori",\n  year         = "2016",\n  month        = "August 22-24",\n  location     = "Tokyo, Japan",\n  publisher    = "Springer International Publishing",\n  address      = "Cham",\n  pages        = "257--268",\n  isbn         = "978-3-319-43506-0",\n  doi          = "10.1007/978-3-319-43506-0_22",\n  url          = "http://dx.doi.org/10.1007/978-3-319-43506-0_22",\n  url_Spinger  = "https://link.springer.com/chapter/10.1007/978-3-319-43506-0_22",\n  abstract     = "For mapping with mobile robots the fast acquisition\n                  of dense point clouds is important. Different sensor\n                  techniques and devices exist for different\n                  applications. In this paper, we present a novel\n                  platform for rotating 3D and 2D LiDAR sensors. It\n                  allows for swiftly capturing 3D scans that are\n                  densely populated and that almost cover a full\n                  sphere. While the platform design is generic and\n                  many common LRF can be mounted on it, in our setup\n                  we use a Velodyne VLP-16 PUCK LiDAR as well as a\n                  Hokuyo UTM-30LX-EW LRF to acquire distance\n                  measurements. We describe the hardware design as\n                  well as the control software. We further compare our\n                  system with other existing commercial and\n                  non-commercial designs, especially with the FARO\n                  Focus3D X 130.",\n}\n\n
\n
\n\n\n
\n For mapping with mobile robots the fast acquisition of dense point clouds is important. Different sensor techniques and devices exist for different applications. In this paper, we present a novel platform for rotating 3D and 2D LiDAR sensors. It allows for swiftly capturing 3D scans that are densely populated and that almost cover a full sphere. While the platform design is generic and many common LRF can be mounted on it, in our setup we use a Velodyne VLP-16 PUCK LiDAR as well as a Hokuyo UTM-30LX-EW LRF to acquire distance measurements. We describe the hardware design as well as the control software. We further compare our system with other existing commercial and non-commercial designs, especially with the FARO Focus3D X 130.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A System Layout for Cognitive Service Robots.\n \n \n \n \n\n\n \n Schiffer, S.; and Ferrein, A.\n\n\n \n\n\n\n In Chrisley, R.; Müller, V. C.; Sandamirskaya, Y.; and Vincze, M., editor(s), Proceedings of EUCognition 2016 Cognitive Robot Architectures, volume 1855, of CEUR Workshop Proceedings, pages 44–45, December 8-9 2016. CEUR-WS.org\n \n\n\n\n
\n\n\n\n \n \n \"A pdf\n  \n \n \n \"A ceur-ws\n  \n \n \n \"A dblpconf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ SchifferFerrein:EUCognition2016:CognitiveSystemLayout,\n  author       = {Stefan Schiffer and Alexander Ferrein},\n  title        = {A System Layout for Cognitive Service Robots},\n  booktitle    = {Proceedings of EUCognition 2016 Cognitive Robot Architectures},\n  editor       = {Ron Chrisley and Vincent C. M{\\"{u}}ller and Yulia Sandamirskaya and Markus Vincze},\n  OPTbooktitle    = {Cognitive Robot Architectures, Proceedings of EUCognition 2016 European Association for Cognitive Systems, Vienna, 8-9 December, 2016},\n  pages        = {44--45},\n  year         = {2016},\n  month        = {December 8-9},\n  location     = {Vienna},\n  organizer    = {European Association for Cognitive Systems},\n  series       = {{CEUR} Workshop Proceedings},\n  volume       = {1855},\n  publisher    = {CEUR-WS.org},\n  url_PDF      = {http://ceur-ws.org/Vol-1855/EUCognition_2016_Part11.pdf},\n  url_CEUR-WS  = {http://ceur-ws.org/Vol-1855},\n  url_DBLPconf = {http://dblp.uni-trier.de/db/conf/eucognition/eucognition2016.html},\n  bib_DBLP     = {http://dblp.org/rec/bib/conf/eucognition/0002F16},\n  bib_DBLPconf = {http://dblp.org/rec/bib/conf/eucognition/2016},\n  urn          = {urn:nbn:de:0074-1855-C},\n  abstract     = {In this paper we discuss a system layout for\n                  cognitive service robots. The goal is to sketch\n                  components and their inter- play needed for\n                  cognitive robotics as introduced by Ray Reiter. We\n                  are particularly interested in applications in\n                  domestic service robotics where we focus on\n                  integrating qualitative reasoning and human-robot\n                  interaction. The overall objective is to build and\n                  maintain a knowledge-based system and agent\n                  specification.},\n}\n\n\n
\n
\n\n\n
\n In this paper we discuss a system layout for cognitive service robots. The goal is to sketch components and their inter- play needed for cognitive robotics as introduced by Ray Reiter. We are particularly interested in applications in domestic service robotics where we focus on integrating qualitative reasoning and human-robot interaction. The overall objective is to build and maintain a knowledge-based system and agent specification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decision-Theoretic Planning with Fuzzy Notions in GOLOG.\n \n \n \n \n\n\n \n Schiffer, S.; and Ferrein, A.\n\n\n \n\n\n\n International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems, 24: 123–143. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Decision-TheoreticPaper\n  \n \n \n \"Decision-Theoretic pdf\n  \n \n \n \"Decision-Theoretic pdfplus\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ Schiffer:Ferrein:IJUFKS2016:DTPlanningFuzzyNotions,\n  author       = {Stefan Schiffer and Alexander Ferrein},\n  title        = {{Decision-Theoretic Planning with Fuzzy Notions in GOLOG}},\n  journal      = {International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems},\n  volume       = {24},\n  OPTnumber       = {Suppl. 2},\n  pages        = {123--143},\n  year         = {2016},\n  doi          = {10.1142/S0218488516400134},\n  url          = {http://www.worldscientific.com/doi/abs/10.1142/S0218488516400134},\n  url_PDF      = {http://www.worldscientific.com/doi/pdf/10.1142/S0218488516400134},\n  url_PDFplus  = {http://www.worldscientific.com/doi/pdfplus/10.1142/S0218488516400134},\n  abstract     = {In this paper we present an extension of the action\n                  language Golog that allows for using fuzzy notions\n                  in non-deterministic argument choices and the reward\n                  function in decision-theoretic planning. Often, in\n                  decision-theoretic planning, it is cumbersome to\n                  specify the set of values to pick from in the\n                  non-deterministic-choice-of-argument\n                  statement. Also, even for domain experts, it is not\n                  always easy to specify a reward function. Instead of\n                  providing a finite domain for values in the\n                  non-deterministic-choice-of-argument statement in\n                  Golog, we now allow for stating the argument domain\n                  by simply providing a formula over linguistic terms\n                  and fuzzy uents. In Golog’s forward-search DT\n                  planning algorithm, these formulas are evaluated in\n                  order to find the agent’s optimal policy. We\n                  illustrate this in the Diner Domain where the agent\n                  needs to calculate the optimal serving order.},\n}\n
\n
\n\n\n
\n In this paper we present an extension of the action language Golog that allows for using fuzzy notions in non-deterministic argument choices and the reward function in decision-theoretic planning. Often, in decision-theoretic planning, it is cumbersome to specify the set of values to pick from in the non-deterministic-choice-of-argument statement. Also, even for domain experts, it is not always easy to specify a reward function. Instead of providing a finite domain for values in the non-deterministic-choice-of-argument statement in Golog, we now allow for stating the argument domain by simply providing a formula over linguistic terms and fuzzy uents. In Golog’s forward-search DT planning algorithm, these formulas are evaluated in order to find the agent’s optimal policy. We illustrate this in the Diner Domain where the agent needs to calculate the optimal serving order.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrating Qualitative Reasoning and Human-Robot Interaction in Domestic Service Robotics.\n \n \n \n \n\n\n \n Schiffer, S.\n\n\n \n\n\n\n KI - Künstliche Intelligenz, 30(3): 257–265. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"IntegratingPaper\n  \n \n \n \"Integrating dblpbib\n  \n \n \n \"Integrating springer\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{ Schiffer:KI2016:Integrating,\n  author       = "Stefan Schiffer",\n  title        = "Integrating Qualitative Reasoning and Human-Robot Interaction in Domestic Service Robotics",\n  journal      = {{KI} - K{\\"u}nstliche Intelligenz},\n  year         = "2016",\n  volume       = "30",\n  number       = "3",\n  pages        = "257--265",\n  issn         = "1610-1987",\n  doi          = "10.1007/s13218-016-0436-x",\n  url          = "http://dx.doi.org/10.1007/s13218-016-0436-x",\n  url_DBLPbib  = {http://dblp.org/rec/bib/journals/ki/Schiffer16},\n  url_Springer = {https://link.springer.com/article/10.1007/s13218-016-0436-x},\n  abstract     = "In this paper we discuss a system layout for\n                  cognitive service robots and our implementation of\n                  such a system. Our focus is on integrating\n                  qualitative reasoning and human-robot\n                  interaction. After introducing the domestic service\n                  robotics domain with its challenges and the\n                  RoboCup@Home initiative we present our robot\n                  platform, its basic capabilities and its high-level\n                  reasoning system. Then, we discuss a system layout\n                  for a cognitive service robot in domestic domains,\n                  and we show how components of our service robot\n                  implement elements of such a system layout. We\n                  discuss strengths and limitations of these\n                  components and of the overall system.",\n}\n\n
\n
\n\n\n
\n In this paper we discuss a system layout for cognitive service robots and our implementation of such a system. Our focus is on integrating qualitative reasoning and human-robot interaction. After introducing the domestic service robotics domain with its challenges and the RoboCup@Home initiative we present our robot platform, its basic capabilities and its high-level reasoning system. Then, we discuss a system layout for a cognitive service robot in domestic domains, and we show how components of our service robot implement elements of such a system layout. We discuss strengths and limitations of these components and of the overall system.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Local Planner for Ackermann-Driven Vehicles in ROS SBPL.\n \n \n \n \n\n\n \n Limpert, N.; Schiffer, S.; and Ferrein, A.\n\n\n \n\n\n\n In Proceedings of the 8th International Conference on Pattern Recognition Association of South Africa and Robotics and Mechatronics (PRASA-RobMech 2015), pages 172–177, November 26–27 2015. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"A doi\n  \n \n \n \"A ieee\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Limpert:Schiffer:Ferrein:RobMech2015:SBPL,\n  author       = {Nicolas Limpert and Stefan Schiffer and Alexander Ferrein},\n  title        = {A Local Planner for {Ackermann}-Driven Vehicles in {ROS} {SBPL}},\n  booktitle    = {Proceedings of the 8th International Conference on  Pattern Recognition Association of South Africa and Robotics and Mechatronics (PRASA-RobMech 2015)},\n  OPTeditor    = {},\n  pages        = {172--177},\n  year         = {2015},\n  month        = "November 26--27",\n  location     = "Port Elizabeth, South Africa",\n  publisher    = {IEEE},\n  OPTaddress   = {},\n  ISBN_Electr  = {978-1-4673-7450-7},\n  ISBN_USB     = {978-1-4673-7449-1},\n  ISBN_PoD     = {978-1-4673-7451-4},\n  INSPEC       = {15668799},\n  doi          = {10.1109/RoboMech.2015.7359518},\n  url_DOI      = {http://dx.doi.org/10.1109/RoboMech.2015.7359518},\n  url_IEEE     = {http://ieeexplore.ieee.org/document/7359518/},\n  abstract     = {For a mobile service robot, safe navigation is\n                  essential.  To do so, the robot needs to be equipped\n                  with collision avoidance and global path planning\n                  capabilities.  The target platforms considered in\n                  this paper are Ackermann-driven vehicles.  Hence, a\n                  planning approach which directly takes the kinematic\n                  and dynamic constraints of the vehicle into account\n                  is required.  The Search-based Planning Library\n                  (SBPL) package of the Robot Operating System (ROS)\n                  provides global path planning which takes these\n                  constraints into account.  However, it misses a\n                  local planner that can also make use of the\n                  Ackermann kinematic constraints for collision\n                  avoidance.  A local planner is useful to take\n                  dynamic obstacles into account as early as possible.\n                  In this paper, we extend the SBPL included in ROS by\n                  a local planner, which makes use of motion\n                  primitives.  We extend the ROS package and show\n                  first experimental results on Ackermann-driven\n                  vehicles.},\n}\n
\n
\n\n\n
\n For a mobile service robot, safe navigation is essential. To do so, the robot needs to be equipped with collision avoidance and global path planning capabilities. The target platforms considered in this paper are Ackermann-driven vehicles. Hence, a planning approach which directly takes the kinematic and dynamic constraints of the vehicle into account is required. The Search-based Planning Library (SBPL) package of the Robot Operating System (ROS) provides global path planning which takes these constraints into account. However, it misses a local planner that can also make use of the Ackermann kinematic constraints for collision avoidance. A local planner is useful to take dynamic obstacles into account as early as possible. In this paper, we extend the SBPL included in ROS by a local planner, which makes use of motion primitives. We extend the ROS package and show first experimental results on Ackermann-driven vehicles.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Video Conference Tool Robot ViCToR.\n \n \n \n \n\n\n \n Goeckel, T.; Schiffer, S.; Wagner, H.; and Lakemeyer, G.\n\n\n \n\n\n\n In Liu, H.; Kubota, N.; Zhu, X.; Dillmann, R.; and Zhou, D., editor(s), Intelligent Robotics and Applications, volume 9245, of Lecture Notes in Computer Science, pages 61–73. Springer International Publishing, 2015.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The springer\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{ Goeckel:Schiffer:EtAl:ICIRA2015:ViCToR,\n  author       = {Goeckel, Tom and Schiffer, Stefan and Wagner, Hermann and Lakemeyer, Gerhard},\n  title        = {The Video Conference Tool Robot {ViCToR}},\n  booktitle    = {Intelligent Robotics and Applications},\n  year         = {2015},\n  isbn         = {978-3-319-22875-4},\n  volume       = {9245},\n  series       = {Lecture Notes in Computer Science},\n  editor       = {Liu, Honghai and Kubota, Naoyuki and Zhu, Xiangyang and Dillmann, R{\\"u}diger and Zhou, Dalin},\n  doi          = {10.1007/978-3-319-22876-1_6},\n  url          = {http://dx.doi.org/10.1007/978-3-319-22876-1_6},\n  url_Springer = {https://link.springer.com/chapter/10.1007\\%2F978-3-319-22876-1_6},\n  publisher    = {Springer International Publishing},\n  pages        = {61--73},\n  abstract     = {We present a robotic tool that autonomously follows\n                  a conversation to enable remote presence in video\n                  conferencing. When humans participate in a meeting\n                  with the help of video conferencing tools, it is\n                  crucial that they are able to follow the\n                  conversation both with acoustic and visual input. To\n                  this end, we design and implement a video\n                  conferencing tool robot that uses binaural sound\n                  source localization as its main source to\n                  autonomously orient towards the currently talking\n                  speaker. To increase robustness of the acoustic cue\n                  against noise we supplement the sound localization\n                  with a source detection stage. Also, we include a\n                  simple onset detector to retain fast response\n                  times. Since we only use two microphones, we are\n                  confronted with ambiguities on whether a source is\n                  in front or behind the device. We resolve these\n                  ambiguities with the help of face detection and\n                  additional moves. We tailor the system to our target\n                  scenarios in experiments with a four minute scripted\n                  conversation. In these experiments we evaluate the\n                  influence of different system settings on the\n                  responsiveness and accuracy of the device.},\n}\n
\n
\n\n\n
\n We present a robotic tool that autonomously follows a conversation to enable remote presence in video conferencing. When humans participate in a meeting with the help of video conferencing tools, it is crucial that they are able to follow the conversation both with acoustic and visual input. To this end, we design and implement a video conferencing tool robot that uses binaural sound source localization as its main source to autonomously orient towards the currently talking speaker. To increase robustness of the acoustic cue against noise we supplement the sound localization with a source detection stage. Also, we include a simple onset detector to retain fast response times. Since we only use two microphones, we are confronted with ambiguities on whether a source is in front or behind the device. We resolve these ambiguities with the help of face detection and additional moves. We tailor the system to our target scenarios in experiments with a four minute scripted conversation. In these experiments we evaluate the influence of different system settings on the responsiveness and accuracy of the device.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Akbaba – An Agent for the Angry Birds AI Challenge Based on Search and Simulation.\n \n \n \n\n\n \n Schiffer, S.; Jourenko, M.; and Lakemeyer, G.\n\n\n \n\n\n\n IEEE Transactions on Computational Intelligence and AI in Games, PP(99): 1–12. Sep 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{ Schiffer:Jourenko:Lakemeyer:TCIAIG2015SIPBSG:Akbaba,\n  title      = {{Akbaba} -- An Agent for the {Angry Birds} {AI} {Challenge} Based on Search and Simulation},\n  author     = {Schiffer, Stefan and Jourenko, Maxim and Lakemeyer, Gerhard},\n  journal    = {IEEE Transactions on Computational Intelligence and AI in Games},\n  year       = {2015},\n  month      = {Sep},\n  volume     = {PP},\n  number     = {99},\n  pages      = {1--12},\n  keywords   = {Artificial intelligence; Angry Birds; Computational modeling; Physics Engines; Games;Search; Simulation},\n  doi        = {10.1109/TCIAIG.2015.2478703},\n  ISSN       = {1943-068X},\n  abstract   = {We report on our entry for the AI Birds competition,\n                where we designed, implemented and evaluated an agent\n                for the physics puzzle computer game Angry Birds. Our\n                agent uses search and simulation to find appropriate\n                parameters for launching birds. While there are other\n                methods that focus on qualitative reasoning about\n                physical systems we try to combine simulation and\n                adjustable abstractions to efficiently traverse the\n                possibly infinite search space. The agent features a\n                hierarchical search scheme where different levels of\n                abstractions are used. At any level, it uses\n                simulation to rate subspaces that should be further\n                explored in more detail on the next levels. We\n                evaluate single components of our agent and we also\n                compare the overall performance of different versions\n                of our agent. We show that our approach yields a\n                competitive solution on the standard set of levels.},\n}\n
\n
\n\n\n
\n We report on our entry for the AI Birds competition, where we designed, implemented and evaluated an agent for the physics puzzle computer game Angry Birds. Our agent uses search and simulation to find appropriate parameters for launching birds. While there are other methods that focus on qualitative reasoning about physical systems we try to combine simulation and adjustable abstractions to efficiently traverse the possibly infinite search space. The agent features a hierarchical search scheme where different levels of abstractions are used. At any level, it uses simulation to rate subspaces that should be further explored in more detail on the next levels. We evaluate single components of our agent and we also compare the overall performance of different versions of our agent. We show that our approach yields a competitive solution on the standard set of levels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Abstracting Away Low-Level Details in Service Robotics with Fuzzy Fluents.\n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Model-Driven Knowledge Engineering for Improved Software Modularity in Robotics and Automation, Workshop at European Robotics Forum 2015, pages 7–10, March 11–13 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Schiffer:etAl:MDKE2015:FuzzyAway,\n  author       = {Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer},\n  title        = {{Abstracting Away Low-Level Details\n                   in Service Robotics with Fuzzy Fluents}},\n  booktitle    = {Model-Driven Knowledge Engineering for Improved\n                  Software Modularity in Robotics and Automation,\n                  Workshop at European Robotics Forum 2015},\n  editors      = {Ulrike Thomas, Klas Nilsson, Bernhard Rumpe, Andreas Wortmann},\n  location     = {Vienna, Austria},\n  year         = {2015},\n  month        = {March 11--13},\n  OPTorganization = {ERF},\n  pages        = {7--10},\n  isbn         = {},\n  abstract     = {In domestic service robotic applications, complex\n                  tasks have to be fulfilled in close collaboration\n                  with humans. We try to integrate qualitative\n                  reasoning and human-robot interaction by bridging\n                  the gap in human and robot representations and by\n                  enabling the seamless integration of human notions\n                  in the robot’s high-level control. The developed\n                  methods can also be used to abstract away low-level\n                  details of specific robot platforms. These low-level\n                  details often pose a problem in re-using software\n                  components and applying the same programs and\n                  methods in different contexts. When combined with\n                  methods for self-maintenance developed earlier these\n                  abstractions also allow for seamlessly increasing\n                  the robustness and resilience of different robotic\n                  systems with only little effort.},\n}\n\n
\n
\n\n\n
\n In domestic service robotic applications, complex tasks have to be fulfilled in close collaboration with humans. We try to integrate qualitative reasoning and human-robot interaction by bridging the gap in human and robot representations and by enabling the seamless integration of human notions in the robot’s high-level control. The developed methods can also be used to abstract away low-level details of specific robot platforms. These low-level details often pose a problem in re-using software components and applying the same programs and methods in different contexts. When combined with methods for self-maintenance developed earlier these abstractions also allow for seamlessly increasing the robustness and resilience of different robotic systems with only little effort.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decision-Theoretic Planning with Linguistic Terms in Golog.\n \n \n \n \n\n\n \n Schiffer, S.; and Ferrein, A.\n\n\n \n\n\n\n In Díaz, I.; Ralescu, A.; and Schiffer, S., editor(s), Proceedings of the Workshop on Fuzzy Logic in AI (FLinAI) 2015, co-located with the 24th International Joint Conference on Artificial Intelligence (IJCAI 2015), volume 1424, of CEUR Workshop Proceedings, July 25 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Decision-Theoretic pdf\n  \n \n \n \"Decision-Theoretic conf\n  \n \n \n \"Decision-Theoretic dblp bib\n  \n \n \n \"Decision-Theoretic dblp bib conf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ FLinAI2015FuzzyDT,\n  author    = {Stefan Schiffer and Alexander Ferrein},\n  title     = {Decision-Theoretic Planning with Linguistic Terms in Golog},\n  booktitle = {Proceedings of the Workshop on Fuzzy Logic in AI (FLinAI) 2015,\n               co-located with the 24th International Joint Conference on Artificial Intelligence ({IJCAI} 2015)},\n  editor    = {Irene D{\\'{\\i}}az and Anca Ralescu and Stefan Schiffer},\n  series    = {{CEUR} Workshop Proceedings},\n  volume    = {1424},\n  location  = {Buenos Aires, Argentina},\n  month     = {July 25},\n  year         = {2015},\n  url_PDF      = {http://ceur-ws.org/Vol-1424/Paper1.pdf},\n  url_Conf     = {http://ceur-ws.org/Vol-1424},\n  url_DBLP_bib = {http://dblp.uni-trier.de/rec/bib/conf/ijcai/0002F15},\n  url_DBLP_bib_Conf = {http://dblp.uni-trier.de/rec/bib/conf/ijcai/2015flinai},\n  abstract     = {In this paper we propose an extension of the action\n                  language GOLOG that integrates linguistic terms in\n                  non-deterministic argument choices and the reward\n                  function for decision-theoretic planning. It is\n                  often cumbersome to specify the set of values to\n                  pick from in the\n                  non-deterministic-choice-of-argument statement.\n                  Also, specifying a reward function is not always\n                  easy, even for domain experts. Instead of providing\n                  a finite domain for values in the\n                  non-deterministic-choice-of-argument statement in\n                  GOLOG, we now allow for stating the argument domain\n                  by simply providing a formula over linguistic terms\n                  and fuzzy fluents. In GOLOG’s forward-search DT\n                  planning algorithm, these formulas are evaluated in\n                  order to find the agent’s optimal policy. We\n                  illustrate this in the Diner Domain where the agent\n                  needs to calculate the optimal serving order.},\n}\n
\n
\n\n\n
\n In this paper we propose an extension of the action language GOLOG that integrates linguistic terms in non-deterministic argument choices and the reward function for decision-theoretic planning. It is often cumbersome to specify the set of values to pick from in the non-deterministic-choice-of-argument statement. Also, specifying a reward function is not always easy, even for domain experts. Instead of providing a finite domain for values in the non-deterministic-choice-of-argument statement in GOLOG, we now allow for stating the argument domain by simply providing a formula over linguistic terms and fuzzy fluents. In GOLOG’s forward-search DT planning algorithm, these formulas are evaluated in order to find the agent’s optimal policy. We illustrate this in the Diner Domain where the agent needs to calculate the optimal serving order.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the Workshop on Fuzzy Logic in AI, FLinAI 2015, co-located with the 24th International Joint Conference on Artificial Intelligence (IJCAI 2015).\n \n \n \n \n\n\n \n Díaz, I.; Ralescu, A.; and Schiffer, S.,\n editors.\n \n\n\n \n\n\n\n Volume 1424, of CEUR Workshop Proceedings.CEUR-WS.org. July 25 2015.\n \n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{ FLinAI2015Proc,\n  editor    = {Irene D{\\'{\\i}}az and Anca Ralescu and Stefan Schiffer},\n  title     = {Proceedings of the Workshop on Fuzzy Logic in AI, FLinAI 2015,\n               co-located with the 24th International Joint Conference on Artificial Intelligence ({IJCAI} 2015)},\n  month     = {July 25},\n  year         = {2015},\n  location  = {Buenos Aires, Argentina},\n  series    = {{CEUR} Workshop Proceedings},\n  volume    = {1424},\n  publisher = {CEUR-WS.org},\n  year      = {2015},\n  url       = {http://ceur-ws.org/Vol-1424},\n  biburl    = {http://dblp.uni-trier.de/rec/bib/conf/ijcai/2015flinai},\n  bibsource = {dblp computer science bibliography, http://dblp.org},\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrating Qualitative Reasoning and Human-Robot Interaction for Domestic Service Robots.\n \n \n \n \n\n\n \n Schiffer, S.\n\n\n \n\n\n\n Ph.D. Thesis, RWTH Aachen University, Department of Computer Science, Feb 2015.\n \n\n\n\n
\n\n\n\n \n \n \"IntegratingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@PhdThesis{Schiffer2015Dissertation,\n  author       = {Schiffer, Stefan},\n  title        = {Integrating Qualitative Reasoning and Human-Robot Interaction for Domestic Service Robots},\n  school       = {RWTH Aachen University, Department of Computer Science},\n  institution  = {Knowledge-Based Systems Group},\n  OPTtype         = {Dr.},\n  type         = {Dissertation},\n  year         = {2015},\n  month        = {Feb},\n  supervisor   = {Prof. Gerhard Lakemeyer, PhD},\n  cosupervisor = {Prof. Dr. Maren Bennewitz},\n  defensedate  = {2014-12-17},\n  pdfurl       = {https://kbsg.rwth-aachen.de/~schiffer/bib/schiffer2015diss.pdf},\n  pages        = {224},\n  publisher    = {Publikationsserver der RWTH Aachen University},\n  cin          = {121920},\n  cid          = {$I:(DE-82)121920_20140620$},\n  typ          = {PUB:(DE-HGF)11},\n  urn          = {urn:nbn:de:hbz:82-rwth-2015-006607},\n  url          = {https://publications.rwth-aachen.de/record/462448},\n  keywords     = {Qualitative Reasoning, Human-Robot Interaction, Domestic Service Robots, RoboCup@Home},\n  abstract     = {The last decade has seen an increasing interest in\n                  domestic service robots. Particular challenges for\n                  such robots especially when performing complex tasks\n                  are deliberation, robust execution of actions, and\n                  flexible human-robot interaction. Despite progress\n                  in qualitative reasoning and human-robot interaction\n                  their integration is an open issue.\n                  In this thesis, we build on an existing cognitive\n                  mobile robot platform and make a series of\n                  contributions to integrate qualitative\n                  representations, high-level reasoning and\n                  human-robot interaction for an intelligent domestic\n                  service robot. We start by introducing the domestic\n                  service robotics domain and parts of the\n                  RoboCup@Home methodology that we contributed\n                  to. Before we can actually turn to our main focus,\n                  we equip the system with a set of basic capabilities\n                  that are required for a service robot in human\n                  environments. As a bridge between perception and\n                  symbolic reasoning we provide a semantic mapping\n                  scheme that allows to centrally manage information\n                  about the environment. With a novel hierarchical\n                  object recognition method we are further able to\n                  classify even yet unseen objects.\n                  Then we move on to the main contributions of this\n                  thesis. First, we extend the robot with important\n                  modes for human-robot interaction by adding\n                  components for speech, face, and gesture recognition\n                  as well as for speech synthesis and a virtual facial\n                  display. For the speech input we proceed with a\n                  simple form of natural language understanding that\n                  allows a limited form of error recovery. Second, we\n                  introduce qualitative representations and control to\n                  our high-level control system.  After integrating a\n                  general account for qualitative information based on\n                  fuzzy sets into our high-level language we also add\n                  means to specify and use fuzzy controllers for\n                  behaviour specification. Then we focus on spatial\n                  data and provide a formalization that allows for\n                  representing and reasoning with qualitative\n                  positional information in our high-level language.\n                  Lastly, we increase the robustness of the robot\n                  against internal errors and add to the flexibility\n                  in dealing with possibly faulty external input. We\n                  integrate a basic form of self-maintenance that\n                  allows the robot to recover from internal errors by\n                  itself.},\n  keywords-de  = {Qualitatives Schlie�en, Mensch-Roboter Interaktion, Serviceroboter, Haushaltsroboter},\n  abstract-de  = {Innerhalb des letzten Jahrzehnts ist das Interesse\n                  an Servicerobotern fuer Haushaltsumgebungen stetig\n                  gestiegen. Einige der Herausforderungen fuer solche\n                  Roboter, insbesondere bei der Bewaeltigung von\n                  komplexen Aufgaben, bestehen in der Deliberation,\n                  der robusten Ausfuehrung von Aktionen und der\n                  flexiblen Mensch-Roboter-Interaktion. Ungeachtet der\n                  Fortschritte auf den Gebieten des qualitativen\n                  Schliessens und in der Mensch-Roboter-Interaktion\n                  ist die geeignete Integration dieser beiden Aspekte\n                  eine noch offene Fragestellung.\n                  Aufbauend auf einem existierenden Robotersystem\n                  liefert diese Dissertation eine Reihe von Beitraegen\n                  zur Integration von qualitativen Repraesentationen,\n                  high-level Reasoning und Mensch-Roboter-Interaktion\n                  fuer einen intelligenten Haushaltsroboter. Wir\n                  beginnen mit einer Vorstellung der Haushaltsrobotik\n                  und von Teilen der Methodologie von RoboCup@Home, an\n                  der wir mitgewirkt haben. Bevor wir uns den\n                  eigentlichen Beitraegen dieser Arbeit widmen\n                  koennen, muessen wir zunaechst das Robotersystem mit\n                  einigen Basisfertigkeiten ausstatten, die jeder\n                  Serviceroboter mitbringen muss, der in menschlichen\n                  Umgebungen arbeiten soll. Als Bindeglied zwischen\n                  sensorischer Wahrnehmung und symbolischem Schliessen\n                  stellen wir eine Methode zur semantischen\n                  Kartografie vor, die die zentrale Verwaltung von\n                  Informationen ueber die Umgebung des Roboters\n                  ermoeglicht. Dank eines neuartigen hierarchischen\n                  Verfahrens zur Objekterkennung sind wir in der Lage\n                  sogar zuvor unbekannte Objekte zu erkennen.\n                  Dann widmen wir uns den Hauptbeitraegen dieser\n                  Dissertation. Zuerst erweitern wir unseren Roboter\n                  mit wichtigen Faehigkeiten zur\n                  Mensch-Roboter-Interaktion, indem wir Komponenten\n                  zur Sprach-, Gesichts- und Gestenerkennung sowie zur\n                  Sprachsynthese und zur Darstellung eines\n                  kuenstlichen Gesichts integrieren. Auf die\n                  Spracherkennung folgt eine einfache Form des\n                  Sprachverstehens, welche eine begrenzte\n                  Fehlerkorrektur erlaubt. Dann erweitern wir unser\n                  high-level Kontrollsystem um Moeglichkeiten fuer\n                  qualitative Repraesentationen und zur qualitativen\n                  Kontrolle. Nachdem wir diese beiden Aspekte\n                  zunächst generisch behandeln, stellen wir eine\n                  spezielle Erweiterung zur Repräsentation von und\n                  zum Schliessen mit qualitativen raeumlichen\n                  Informationen vor. Schliesslich erhoehen wir die\n                  Robustheit unseres Systems gegenueber internen\n                  Fehlern durch die Einfuehrung einer\n                  Selbstwartungsfunktion. Diese versetzt den Roboter\n                  in die Lage, gewisse Fehler selbstaendig zu beheben.},\n}
\n
\n\n\n
\n The last decade has seen an increasing interest in domestic service robots. Particular challenges for such robots especially when performing complex tasks are deliberation, robust execution of actions, and flexible human-robot interaction. Despite progress in qualitative reasoning and human-robot interaction their integration is an open issue. In this thesis, we build on an existing cognitive mobile robot platform and make a series of contributions to integrate qualitative representations, high-level reasoning and human-robot interaction for an intelligent domestic service robot. We start by introducing the domestic service robotics domain and parts of the RoboCup@Home methodology that we contributed to. Before we can actually turn to our main focus, we equip the system with a set of basic capabilities that are required for a service robot in human environments. As a bridge between perception and symbolic reasoning we provide a semantic mapping scheme that allows to centrally manage information about the environment. With a novel hierarchical object recognition method we are further able to classify even yet unseen objects. Then we move on to the main contributions of this thesis. First, we extend the robot with important modes for human-robot interaction by adding components for speech, face, and gesture recognition as well as for speech synthesis and a virtual facial display. For the speech input we proceed with a simple form of natural language understanding that allows a limited form of error recovery. Second, we introduce qualitative representations and control to our high-level control system. After integrating a general account for qualitative information based on fuzzy sets into our high-level language we also add means to specify and use fuzzy controllers for behaviour specification. Then we focus on spatial data and provide a formalization that allows for representing and reasoning with qualitative positional information in our high-level language. Lastly, we increase the robustness of the robot against internal errors and add to the flexibility in dealing with possibly faulty external input. We integrate a basic form of self-maintenance that allows the robot to recover from internal errors by itself.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Natural Language Processing in Domestic Service Robotics.\n \n \n \n\n\n \n Schiffer, S.\n\n\n \n\n\n\n In Neumann, S.; Niehr, T.; Runkehl, J.; Niemietz, P.; and Fest, J., editor(s), LingUnite – Tag der Sprachforschung, Aachen, Germany, Oct 11 2013. RWTH Aachen University\n Best Poster Award\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ Schiffer_LingUnite2013_NLPinDSR,\n  title       = {Natural Language Processing in Domestic Service Robotics},\n  author      = {Stefan Schiffer},\n  affiliation = {Knowledge-Based Systems Group, RWTH Aachen University},\n  booktitle   = {LingUnite -- Tag der Sprachforschung},\n  editor      = {Stella Neumann and Thomas Niehr and Jens Runkehl and Paula Niemietz and Jennifer Fest},\n  location    = {RWTH Aachen University},\n  month       = {Oct 11},\n  year        = {2013},\n  publisher   = {RWTH Aachen University},\n  address     = {Aachen, Germany},\n  OPTpages       = {},\n  OPTnote        = {poster presentation},\n  note        = {Best Poster Award},\n  abstract    = {As robots are more and more entering our everyday\n                 life, like as assistive devices to support us in our\n                 home with the daily chores, methods to control and to\n                 interact with such robots become more and more\n                 important. The most natural way for a human to\n                 instruct a robot is perhaps natural language.\n                 However, there are several challenges to master to\n                 allow for suitable humanrobot interaction by means of\n                 natural language. We will report on two of our\n                 efforts in enabling humans to use natural language to\n                 command a domestic service robot. The two methods we\n                 present reside on different levels - one is at a\n                 lower level of recognizing speech from acoustic input\n                 while the second one is about interpreting natural\n                 language. While the former was primarily intended for\n                 noisy scenarios to help rejecting utterances that\n                 were not meant for the robot, the latter yields a\n                 flexible system for commanding a robot which can\n                 resolve ambiguities and which is also capable of\n                 initiating steps to achieve clarification.\n                 The first approach [1] is at the signal processing\n                 stage where the acoustic input received from spoken\n                 language is to be converted to the textual level.\n                 When acting in human environments it is important\n                 that commands given to the robot are recognized\n                 robustly. Also, spoken language not directed to the\n                 robot must not be matched to an instruction for the\n                 robot to execute.  We developed a system that is\n                 robust in noisy environments and that is\n                 insusceptible to act upon commands not meant for the\n                 robot. First, we use a threshold-based close speech\n                 detection to segment utterances targeted at the robot\n                 from the continuous audio stream recorded by a\n                 microphone. Then, we decode these utterances with two\n                 different decoders in parallel, namely one very\n                 restrictive decoder based on finite state grammars\n                 and a second more lenient decoder using N-grams. We\n                 do this to filter out false positive recognitions by\n                 comparing the output of the two decoders and\n                 rejecting the input if it was not recognized by both\n                 decoders.\n                 The second approach [2] takes place on a higher level\n                 of abstraction, that is, it deals with interpreting\n                 an utterance that has already been transformed to\n                 text from the raw audio signal. We model the\n                 processing of natural spoken language input as an\n                 interpretation process where the utterance needs to\n                 be mapped to a robot's capabilities. More precisely,\n                 we first analyse the given utterance syntactically by\n                 using a generic grammar that we developed for english\n                 directives. Then, we cast the interpretation as a\n                 planning problem where the individual actions\n                 available to the planner are to interpret syntactical\n                 elements of the utterance. If, in the course of\n                 interpreting, ambiguities are detected, the system\n                 uses decision-theory to weigh different alternatives.\n                 The system is also able to initiate clarification to\n                 resolve ambiguities and to handle errors as to arrive\n                 at a successful command interpretation eventually. We\n                 show how we evaluated several versions of the system\n                 with multiple utterances of different complexity as\n                 well as with incomplete and erroneous requests.},\n}\n
\n
\n\n\n
\n As robots are more and more entering our everyday life, like as assistive devices to support us in our home with the daily chores, methods to control and to interact with such robots become more and more important. The most natural way for a human to instruct a robot is perhaps natural language. However, there are several challenges to master to allow for suitable humanrobot interaction by means of natural language. We will report on two of our efforts in enabling humans to use natural language to command a domestic service robot. The two methods we present reside on different levels - one is at a lower level of recognizing speech from acoustic input while the second one is about interpreting natural language. While the former was primarily intended for noisy scenarios to help rejecting utterances that were not meant for the robot, the latter yields a flexible system for commanding a robot which can resolve ambiguities and which is also capable of initiating steps to achieve clarification. The first approach [1] is at the signal processing stage where the acoustic input received from spoken language is to be converted to the textual level. When acting in human environments it is important that commands given to the robot are recognized robustly. Also, spoken language not directed to the robot must not be matched to an instruction for the robot to execute. We developed a system that is robust in noisy environments and that is insusceptible to act upon commands not meant for the robot. First, we use a threshold-based close speech detection to segment utterances targeted at the robot from the continuous audio stream recorded by a microphone. Then, we decode these utterances with two different decoders in parallel, namely one very restrictive decoder based on finite state grammars and a second more lenient decoder using N-grams. We do this to filter out false positive recognitions by comparing the output of the two decoders and rejecting the input if it was not recognized by both decoders. The second approach [2] takes place on a higher level of abstraction, that is, it deals with interpreting an utterance that has already been transformed to text from the raw audio signal. We model the processing of natural spoken language input as an interpretation process where the utterance needs to be mapped to a robot's capabilities. More precisely, we first analyse the given utterance syntactically by using a generic grammar that we developed for english directives. Then, we cast the interpretation as a planning problem where the individual actions available to the planner are to interpret syntactical elements of the utterance. If, in the course of interpreting, ambiguities are detected, the system uses decision-theory to weigh different alternatives. The system is also able to initiate clarification to resolve ambiguities and to handle errors as to arrive at a successful command interpretation eventually. We show how we evaluated several versions of the system with multiple utterances of different complexity as well as with incomplete and erroneous requests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Lessons Learnt from Developing the Embodied AI Platform Caesar for Domestic Service Robotics.\n \n \n \n\n\n \n Ferrein, A.; Niemueller, T.; Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Papers from the 2013 AAAI Spring Symposium on Designing Intelligent Robots: Reintegrating AI II, March 25-27 2013. AAAI\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{AAAI-SS2013-DIR2-LessonsLearnt,\n  author       = {Alexander Ferrein and Tim Niemueller and Stefan Schiffer and Gerhard Lakemeyer},\n  title        = {{Lessons Learnt from Developing the Embodied AI Platform Caesar for Domestic Service Robotics}},\n  booktitle    = {Papers from the 2013 AAAI Spring Symposium on Designing Intelligent Robots: Reintegrating AI II},\n  editors      = {Byron Boots, Nick Hawes, Todd Hester, George Konidaris, Bhaskara Marthi, Lorenzo Riano, Benjamin Rosman},\n  location     = {Stanford University, CA, USA},\n  year         = {2013},\n  month        = {March 25-27},\n  organization = {AAAI},\n  number       = {ss-13-04},\n  isbn         = {ISBN 978-1-57735-601-1},\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Natural Language Interpretation for an Interactive Service Robot in Domestic Domains.\n \n \n \n\n\n \n Schiffer, S.; Hoppe, N.; and Lakemeyer, G.\n\n\n \n\n\n\n In Filipe, J.; and Fred, A., editor(s), Agents and Artificial Intelligence, volume 358, of Communications in Computer and Information Science, pages 39–53. Springer Berlin Heidelberg, 2013.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Schiffer:Hoppe:Lakemeyer:ICAART2013:FlexibleCommand,\n  author     = {Stefan Schiffer and Niklas Hoppe and Gerhard Lakemeyer},\n  title      = {Natural Language Interpretation for an Interactive Service Robot in Domestic Domains},\n  year       = {2013},\n  pages      = {39--53},\n  editor     = {Joaquim Filipe and Ana Fred},\n  booktitle  = {Agents and Artificial Intelligence},\n  OPTbooktitle = {International Conference on Agents and Artificial Intelligence (ICAART 2012) Revised Selected Papers},\n  series     = {Communications in Computer and Information Science},\n  volume     = {358},\n  publisher  = {Springer Berlin Heidelberg},\n  doi        = {10.1007/978-3-642-36907-0_3},\n  isbn       = {978-3-642-36906-3},\n  abstract   = {In this paper, we propose a flexible system for robust\n                natural language interpretation of spoken commands on\n                a mobile robot in domestic service robotics\n                applications.  Existing language processing for\n                instructing a mobile robot is often restricted by\n                using a simple grammar where precisely pre-defined\n                utterances are directly mapped to system calls. These\n                approaches do not regard fallibility of human users\n                and they only allow for binary processing of an\n                utterance; either a command is part of the grammar and\n                hence understood correctly, or it is not part of the\n                grammar and gets rejected.  We model the language\n                processing as an interpretation process where the\n                utterance needs to be mapped to a robot's\n                capabilities. We do so by casting the processing as a\n                (decision-theoretic) planning problem on\n                interpretatory actions. % This allows for a flexible\n                system that can resolve ambiguities and which is also\n                capable of initiating steps to achieve clarification.\n                We show how we evaluated several versions of the\n                system with multiple utterances of different\n                complexity as well as with incomplete and erroneous\n                requests.},\n}\n
\n
\n\n\n
\n In this paper, we propose a flexible system for robust natural language interpretation of spoken commands on a mobile robot in domestic service robotics applications. Existing language processing for instructing a mobile robot is often restricted by using a simple grammar where precisely pre-defined utterances are directly mapped to system calls. These approaches do not regard fallibility of human users and they only allow for binary processing of an utterance; either a command is part of the grammar and hence understood correctly, or it is not part of the grammar and gets rejected. We model the language processing as an interpretation process where the utterance needs to be mapped to a robot's capabilities. We do so by casting the processing as a (decision-theoretic) planning problem on interpretatory actions. % This allows for a flexible system that can resolve ambiguities and which is also capable of initiating steps to achieve clarification. We show how we evaluated several versions of the system with multiple utterances of different complexity as well as with incomplete and erroneous requests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Life-long Learning Perception using Cloud Database Technology.\n \n \n \n \n\n\n \n Niemueller, T.; Schiffer, S.; Lakemeyer, G.; and Rezapour-Lakani, S.\n\n\n \n\n\n\n In Waibel, M.; Goldberg, K.; Civera, J.; Aydemir, A.; Ciocarlie, M.; and Gajamohan, M., editor(s), Cloud Robotics Workshop at IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), Nov 3 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Life-longPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{ Niemueller:Schiffer:EtAl:CRWS-IROS2013:LifeLongLearningPerception,\n  title        = {Life-long Learning Perception using Cloud Database Technology},\n  author       = {Tim Niemueller and Stefan Schiffer and \n                  Gerhard Lakemeyer and Safoura Rezapour-Lakani},\n  affiliation  = {Knowledge-Based Systems Group, RWTH Aachen University},\n  booktitle    = {Cloud Robotics Workshop at IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},\n  editor       = {Markus Waibel and Ken Goldberg and Javier Civera and\n                  Alper Aydemir and Matei Ciocarlie and Mohanarajah Gajamohan},\n  location     = {Tokyo, Japan},\n  month        = {Nov 3},\n  year         = {2013},\n  abstract     = {Autonomous mobile robots in household environments have\n                  to cope with many different kinds of objects which\n                  they must detect, recognize, and manipulate. Over\n                  their lifetime, the robots must adapt to new objects\n                  and incorporate new perception methods. In this\n                  paper we present a system for life-long learning of\n                  training data and perception method parameters using\n                  a document-oriented, schema-less database technology\n                  that is typically used in cloud computing\n                  applications. Not only can a single robot extend and\n                  increase its data volume continuously over time, but\n                  it can also potentially share this very dataset with\n                  multiple other robots through the cloud.},\n  keywords     = {Cloud Robotics, Robot Database, Robotic Perception},\n  url          = {http://www.roboearth.org/iros2013},\n  attachments  = {https://kbsg.rwth-aachen.de/sites/kbsg/files/longterm-perception-db-iros2013.pdf},\n}\n
\n
\n\n\n
\n Autonomous mobile robots in household environments have to cope with many different kinds of objects which they must detect, recognize, and manipulate. Over their lifetime, the robots must adapt to new objects and incorporate new perception methods. In this paper we present a system for life-long learning of training data and perception method parameters using a document-oriented, schema-less database technology that is typically used in cloud computing applications. Not only can a single robot extend and increase its data volume continuously over time, but it can also potentially share this very dataset with multiple other robots through the cloud.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n \\textscCaesar – An Intelligent Domestic Service Robot.\n \n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n Journal of Intelligent Service Robotics, 23(Special Issue on Artificial Intelligence in Robotics: Sensing, Representation and Action): 259–273. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"\\textscCaesarPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{ Schiffer:Ferrein:Lakemeyer:JISR:2012:Caesar,\r\n  author      = {Schiffer, Stefan and Ferrein, Alexander and Lakemeyer, Gerhard},\r\n  affiliation = {Knowledge-based Systems Group, RWTH Aachen University, Aachen, Germany},\r\n  title       = {{\\textsc{Caesar} -- An Intelligent Domestic Service Robot}},\r\n  journal     = {Journal of Intelligent Service Robotics},\r\n  year        = {2012},\r\n  OPTmonth       = {},\r\n  volume      = {23},\r\n  number      = {Special Issue on Artificial Intelligence in Robotics: Sensing, Representation and Action},\r\n  editor      = {Fulvio Mastrogiovanni and Nak Young Chong},\r\n  pages       = {259--273},\r\n  numpages    = {24},\r\n  publisher   = {Springer},\r\n  OPTissn        = {},\r\n  OPTisbn        = {},\r\n  keyword     = {Engineering},\r\n  url         = {},\r\n  abstract    = {In this paper we present Caesar, an intelligent\r\n                 domestic service robot.\r\n                 In domestic settings for service robots complex tasks\r\n                 have to be accomplished. Those tasks benefit from\r\n                 deliberation, from robust action execution and from\r\n                 flexible methods for human-robot interaction that\r\n                 account for qualitative notions used in natural\r\n                 language as well as human fallibility.\r\n                 Our robot Caesar deploys AI techniques on several\r\n                 levels of its system architecture.  On the low-level\r\n                 side, system modules for localization or navigation\r\n                 make, for instance, use of path planning methods,\r\n                 heuristic search, and Bayesian filters. For face\r\n                 recognition and human-machine interaction, random\r\n                 trees and well-known methods from natural language\r\n                 processing are deployed.\r\n                 For deliberation, we use the robot programming and\r\n                 plan language Readylog, which was developed for the\r\n                 high-level control of agents and robots; it allows to\r\n                 combine programming the behaviour with using planning\r\n                 to find a course of action. \\Readylog{} is a variant\r\n                 of the robot programming language Golog. % We\r\n                 extended Readylog to be able to cope with qualitative\r\n                 notions of space frequently used by humans such as\r\n                 ``near'' and ``far''. This facilitates human-robot\r\n                 interaction by bridging the gap between human natural\r\n                 language and the numerical values needed by the\r\n                 robot.\r\n                 Further, we use Readylog to increase the flexible\r\n                 interpretation of human commands with\r\n                 decision-theoretic planning. We give an overview of\r\n                 the different methods deployed in Caesar and show the\r\n                 applicability of a system equipped with these AI\r\n                 techniques in domestic service robotics.},\r\n}\r\n
\n
\n\n\n
\n In this paper we present Caesar, an intelligent domestic service robot. In domestic settings for service robots complex tasks have to be accomplished. Those tasks benefit from deliberation, from robust action execution and from flexible methods for human-robot interaction that account for qualitative notions used in natural language as well as human fallibility. Our robot Caesar deploys AI techniques on several levels of its system architecture. On the low-level side, system modules for localization or navigation make, for instance, use of path planning methods, heuristic search, and Bayesian filters. For face recognition and human-machine interaction, random trees and well-known methods from natural language processing are deployed. For deliberation, we use the robot programming and plan language Readylog, which was developed for the high-level control of agents and robots; it allows to combine programming the behaviour with using planning to find a course of action. \\Readylog is a variant of the robot programming language Golog. % We extended Readylog to be able to cope with qualitative notions of space frequently used by humans such as ``near'' and ``far''. This facilitates human-robot interaction by bridging the gap between human natural language and the numerical values needed by the robot. Further, we use Readylog to increase the flexible interpretation of human commands with decision-theoretic planning. We give an overview of the different methods deployed in Caesar and show the applicability of a system equipped with these AI techniques in domestic service robotics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Plan Recognition by Program Execution in Continuous Temporal Domains.\n \n \n \n\n\n \n Schwering, C.; Beck, D.; Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings of the 35th German Conference on Artificial Intelligence (KI'2012), September 24–27 2012. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ Schwering:Beck:Schiffer:Lakemeyer:KI2012:PlaRaPeX,\n  author      = {Christoph Schwering and Daniel Beck and Stefan Schiffer and Gerhard Lakemeyer},\n  title       = {Plan Recognition by Program Execution in Continuous Temporal Domains},\n  booktitle   = {Proceedings of the 35th German Conference on Artificial Intelligence (KI'2012)},\n  year        = {2012},\n  month       = {September 24--27},\n  location    = {Saarbr{\\"u}cken, Germany},\n  OPTeditor      = {},\n  OPTnumber      = {},\n  OPTseries      = {},\n  OPTISSN        = {},\n  OPTpublisher   = {},\n  OPTaddress     = {},\n  OPTurl         = {},\n  abstract    = {Much of the existing work on plan recognition assumes\n                 that actions of other agents can be observed\n                 directly. In continuous temporal domains such as\n                 traffic scenarios this assumption is typically not\n                 warranted. Instead, one is only able to observe facts\n                 about the world such as vehicle positions at\n                 different points in time, from which the agents'\n                 plans need to be inferred. In this paper we show how\n                 this problem can be addressed in the situation\n                 calculus and a new variant of the action programming\n                 language Golog, which includes features such as\n                 continuous time and change, stochastic actions,\n                 nondeterminism, and concurrency.\n                 In our approach we match observations against a set\n                 of candidate plans in the form of Golog programs. We\n                 turn the observations into actions which are then\n                 executed concurrently with the given programs. Using\n                 decision-theoretic optimization techniques those\n                 programs are preferred which bring about the\n                 observations at the appropriate times. Besides\n                 defining this new variant of Golog we also discuss an\n                 implementation and experimental results using driving\n                 maneuvers as an example.},\n}\n
\n
\n\n\n
\n Much of the existing work on plan recognition assumes that actions of other agents can be observed directly. In continuous temporal domains such as traffic scenarios this assumption is typically not warranted. Instead, one is only able to observe facts about the world such as vehicle positions at different points in time, from which the agents' plans need to be inferred. In this paper we show how this problem can be addressed in the situation calculus and a new variant of the action programming language Golog, which includes features such as continuous time and change, stochastic actions, nondeterminism, and concurrency. In our approach we match observations against a set of candidate plans in the form of Golog programs. We turn the observations into actions which are then executed concurrently with the given programs. Using decision-theoretic optimization techniques those programs are preferred which bring about the observations at the appropriate times. Besides defining this new variant of Golog we also discuss an implementation and experimental results using driving maneuvers as an example.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n robOCD: Robotic Order Cups Demo – An Interactive Domestic Service Robotics Demo.\n \n \n \n\n\n \n Schiffer, S.; Baumgartner, T.; Beck, D.; Maleki-Fard, B.; Niemueller, T.; Schwering, C.; and Lakemeyer, G.\n\n\n \n\n\n\n In Poster and Demo Session at the 35th German Conference on Artificial Intelligence (KI 2012), September 24–27 2012. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ Schiffer:EtAl:KI2012:robOCD,\n  author      = {Stefan Schiffer and Tobias Baumgartner and Daniel Beck and Bahram Maleki-Fard and Tim Niemueller and Christoph Schwering and Gerhard Lakemeyer},\n  title       = {{robOCD}: {R}obotic {O}rder {C}ups {D}emo -- An Interactive Domestic Service Robotics Demo},\n  booktitle   = {Poster and Demo Session at the 35th German Conference on Artificial Intelligence (KI 2012)},\n  year        = {2012},\n  month       = {September 24--27},\n  location    = {Saarbr{\\"u}cken, Germany},\n  OPTeditor      = {},\n  OPTnumber      = {},\n  OPTseries      = {},\n  OPTISSN        = {},\n  OPTpublisher   = {},\n  OPTaddress     = {},\n  OPTurl         = {},\n  abstract    = {This paper describes an interactive demonstration by\n                 the AllemaniACs' domestic service robot Caesar. In a\n                 home-like environment Caesar's task is to help\n                 setting the table.  Besides basic capabilities of an\n                 autonomous mobile robot such as localization and\n                 collision free navigation it uses methods for\n                 human-robot interaction and it also has a\n                 sophisticated high-level control that allows for\n                 decision-theoretic planning.  We use this demo to\n                 illustrate the interplay of several modules of our\n                 robot control software in carrying out complex\n                 tasks. The overall system allows to perform robust\n                 reliable service robotics in domestic settings like\n                 in the RoboCup@AtHome league.  Also, we show how our\n                 high-level programming language provides a powerful\n                 framework for agent behavior specification that can\n                 be beneficially deployed for service robotic\n                 applications.},\n}\n
\n
\n\n\n
\n This paper describes an interactive demonstration by the AllemaniACs' domestic service robot Caesar. In a home-like environment Caesar's task is to help setting the table. Besides basic capabilities of an autonomous mobile robot such as localization and collision free navigation it uses methods for human-robot interaction and it also has a sophisticated high-level control that allows for decision-theoretic planning. We use this demo to illustrate the interplay of several modules of our robot control software in carrying out complex tasks. The overall system allows to perform robust reliable service robotics in domestic settings like in the RoboCup@AtHome league. Also, we show how our high-level programming language provides a powerful framework for agent behavior specification that can be beneficially deployed for service robotic applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n BendIT – An Interactive Game with two Robots.\n \n \n \n\n\n \n Niemueller, T.; Schiffer, S.; Helligrath, A.; Lakani, S. R.; and Lakemeyer, G.\n\n\n \n\n\n\n In Poster and Demo Session at the 35th German Conference on Artificial Intelligence (KI 2012), September 24–27 2012. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ Niemueller:Schiffer:EtAl:KI2012:BendIT,\n  author      = {Tim Niemueller and Stefan Schiffer and Albert Helligrath and Safoura Rezapour Lakani and Gerhard Lakemeyer},\n  title       = {{BendIT -- An Interactive Game with two Robots}},\n  booktitle   = {Poster and Demo Session at the 35th German Conference on Artificial Intelligence (KI 2012)},\n  year        = {2012},\n  month       = {September 24--27},\n  location    = {Saarbr{\\"u}cken, Germany},\n  OPTeditor      = {},\n  OPTnumber      = {},\n  OPTseries      = {},\n  OPTISSN        = {},\n  OPTpublisher   = {},\n  OPTaddress     = {},\n  OPTurl         = {},\n  abstract    = {In this paper we report on an interactive game with\n                 two robots and review its components. A human user\n                 uses his torso movements to steer a Robotino robot\n                 along a pre-defined course. Our domestic service\n                 robot Caesar acts as a referee and autonomously\n                 follows the Robotino and makes sure that it stays\n                 within a corridor along the path. If the user manages\n                 to keep the Robotino within the corridor for the\n                 whole path he wins. The game can be used, for\n                 example, to engage people in physical training such\n                 as a rehabilitation after an injury. It was designed\n                 and implemented as a student project in winter term\n                 2011/2012.},\n}\n
\n
\n\n\n
\n In this paper we report on an interactive game with two robots and review its components. A human user uses his torso movements to steer a Robotino robot along a pre-defined course. Our domestic service robot Caesar acts as a referee and autonomously follows the Robotino and makes sure that it stays within a corridor along the path. If the user manages to keep the Robotino within the corridor for the whole path he wins. The game can be used, for example, to engage people in physical training such as a rehabilitation after an injury. It was designed and implemented as a student project in winter term 2011/2012.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Plan Recognition by Program Execution in Continuous Temporal Domains.\n \n \n \n \n\n\n \n Schwering, C.; Beck, D.; Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Burgard, W.; Konolige, K.; Pagnucco, M.; and Vassos, S., editor(s), Proceedings of the 8th International Cognitive Robotics Workshop (CogRob 2012), pages 77–84, 2012. AAAI Press\n \n\n\n\n
\n\n\n\n \n \n \"PlanPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ Schwering:Beck:Schiffer:Lakemeyer:CogRob2012:PlaRaPeX,\n  author      = {Christoph Schwering and Daniel Beck and Stefan Schiffer and Gerhard Lakemeyer},\n  title       = {Plan Recognition by Program Execution in Continuous Temporal Domains},\n  booktitle   = {Proceedings of the 8th International Cognitive Robotics Workshop (CogRob 2012)},\n  pages       = {77--84},\n  year        = {2012},\n  editor      = {Wolfram Burgard and Kurt Konolige and Maurice Pagnucco and Stavros Vassos},\n  OPTnumber      = {},\n  OPTseries      = {AAAI Workshop Proceedings},\n  isbn        = {978-1-57735-571-7},\n  publisher   = {AAAI Press},\n  OPTaddress     = {},\n  location    = {Toronto, Canada},\n  aaaiurl     = {http://www.aaai.org/Workshops/ws12workshops.php#ws03},\n  url         = {https://www.aaai.org/ocs/index.php/WS/AAAIW12/paper/view/5281},\n  url         = {http://www.cse.unsw.edu.au/~cogrob/2012/accepted.html},\n  abstract    = {Much of the existing work on plan recognition assumes\n                 that actions of other agents can be observed\n                 directly. In continuous temporal domains such as\n                 traffic scenarios this assumption is typically not\n                 warranted. Instead, one is only able to observe facts\n                 about the world such as vehicle positions at\n                 different points in time, from which the agents'\n                 intentions need to be inferred. In this paper we show\n                 how this problem can be addressed in the situation\n                 calculus and a new variant of the action programming\n                 language Golog, which includes features such as\n                 continuous time and change, stochastic actions,\n                 nondeterminism, and concurrency.\n                 In our approach we match observations against a set\n                 of candidate plans in the form of Golog programs. We\n                 turn the observations into actions which are then\n                 executed concurrently with the given programs. Using\n                 decision-theoretic optimization techniques those\n                 programs are preferred which bring about the\n                 observations at the appropriate times. Besides\n                 defining this new variant of Golog we also discuss an\n                 implementation and experimental results using driving\n                 maneuvers as an example.},\n}\n
\n
\n\n\n
\n Much of the existing work on plan recognition assumes that actions of other agents can be observed directly. In continuous temporal domains such as traffic scenarios this assumption is typically not warranted. Instead, one is only able to observe facts about the world such as vehicle positions at different points in time, from which the agents' intentions need to be inferred. In this paper we show how this problem can be addressed in the situation calculus and a new variant of the action programming language Golog, which includes features such as continuous time and change, stochastic actions, nondeterminism, and concurrency. In our approach we match observations against a set of candidate plans in the form of Golog programs. We turn the observations into actions which are then executed concurrently with the given programs. Using decision-theoretic optimization techniques those programs are preferred which bring about the observations at the appropriate times. Besides defining this new variant of Golog we also discuss an implementation and experimental results using driving maneuvers as an example.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Flexbile Command Interpretation on an Interactive Domestic Service Robot.\n \n \n \n\n\n \n Schiffer, S.; Hoppe, N.; and Lakemeyer, G.\n\n\n \n\n\n\n In Filipe, J.; and Fred, A., editor(s), ICAART 2012 - Proceedings of the 4th International Conference on Agents and Artificial Intelligence, volume 1 - Artificial Intelligence, pages 26–35, February 6-8 2012. SciTePress\n Best Student Paper Award\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Schiffer:Hoppe:Lakemeyer:ICAART2012:FlexibleCommand,\n  author    = {Stefan Schiffer and Niklas Hoppe and Gerhard Lakemeyer},\n  title     = {Flexbile Command Interpretation on an Interactive Domestic Service Robot},\n  year      = {2012},\n  pages     = {26--35},\n  editor    = {Joaquim Filipe and Ana Fred},\n  booktitle = {ICAART 2012 - Proceedings of the 4th International Conference\n              on Agents and Artificial Intelligence},\n  OPTvolume    = {Volume 1 - Artificial Intelligence},\n  volume    = {1 - Artificial Intelligence},\n  location  = {Algarve, Portugal},\n  month     = {February 6-8},\n  publisher = {SciTePress},\n  isbn      = {978-989-8425-95-9},\n  abstract  = {In this paper, we propose a system for robust and\n                  flexible command interpretation on a mobile robot in\n                  domestic service robotics applications.  Existing\n                  language processing for instructing a mobile robot\n                  often make use of a simple, restricted grammar where\n                  precisely pre-defined utterances are directly mapped\n                  to system calls. This does not take into account\n                  fallibility of human users and only allows for\n                  binary processing; either a command is part of the\n                  grammar and hence understood correctly, or it is not\n                  part of the grammar and gets rejected. We model the\n                  language processing as an interpretation process\n                  where the utterance needs to be mapped to a robot's\n                  capabilities. We do so by casting the processing as\n                  a (decision-theoretic) planning problem on\n                  interpretatory actions. This allows for a flexible\n                  system that can resolve ambiguities and which is\n                  also capable of initiating steps to achieve\n                  clarification.},\n  note      = {Best Student Paper Award},\n}\n
\n
\n\n\n
\n In this paper, we propose a system for robust and flexible command interpretation on a mobile robot in domestic service robotics applications. Existing language processing for instructing a mobile robot often make use of a simple, restricted grammar where precisely pre-defined utterances are directly mapped to system calls. This does not take into account fallibility of human users and only allows for binary processing; either a command is part of the grammar and hence understood correctly, or it is not part of the grammar and gets rejected. We model the language processing as an interpretation process where the utterance needs to be mapped to a robot's capabilities. We do so by casting the processing as a (decision-theoretic) planning problem on interpretatory actions. This allows for a flexible system that can resolve ambiguities and which is also capable of initiating steps to achieve clarification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n \"An Evaluation Framework for Traffic Information Systems based on Data Streams\".\n \n \n \n \n\n\n \n Geisler, S.; Quix, C.; Schiffer, S.; and Jarke, M.\n\n\n \n\n\n\n Transportation Research Part C: Emerging Technologies, 23(Supplement C): 29–55. June 2012.\n Special Issue on Data Management in Vehicular Networks\n\n\n\n
\n\n\n\n \n \n \""AnPaper\n  \n \n \n \""An doi\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{ TRC2012Evaluation,\n  author       = {Sandra Geisler and Christoph Quix and Stefan Schiffer and Matthias Jarke},\n  title        = {"An Evaluation Framework for Traffic Information Systems based on Data Streams"},\n  journal      = "Transportation Research Part C: Emerging Technologies",\n  volume       = {23},\n  number       = {Supplement C},\n  note         = "Special Issue on Data Management in Vehicular Networks",\n  pages        = {29--55},\n  month        = jun,\n  year         = {2012},\n  url          = "http://www.sciencedirect.com/science/article/pii/S0968090X11001136",\n  doi          = "10.1016/j.trc.2011.08.003",\n  url_DOI      = {https://doi.org/10.1016/j.trc.2011.08.003},\n  keywords     = {Data streams, Car-to-X communication, Data stream mining},\n  abstract     = {Traffic information systems have to process and\n                  analyze huge amounts of data in real-time to\n                  effectively provide traffic information to road\n                  users. Progress in mobile communication technology\n                  with higher bandwidths and lower latencies enables\n                  the use of data provided by in-car sensors. Data\n                  stream management systems have been proposed to\n                  address the challenges of such applications which\n                  have to process a continuous data flow from various\n                  data sources in real-time. Data mining methods,\n                  adapted to data streams, can be used to analyze the\n                  data and to identify interesting patterns such as\n                  congestion or road hazards. Although several data\n                  stream mining methods have been proposed, an\n                  evaluation of such methods in the context of traffic\n                  applications is yet missing. In this paper, we\n                  present an evaluation framework for traffic\n                  information systems based on data streams. We apply\n                  a traffic simulation software to emulate the\n                  generation of traffic data by mobile probes. The\n                  framework is applied in two case studies, namely\n                  queue-end detection and traffic state\n                  estimation. The results show which parameters of the\n                  traffic information system significantly impact the\n                  accuracy of the predicted traffic information. This\n                  provides important findings for the design and\n                  implementation of traffic information systems using\n                  data from mobile probes.}, \n}\n
\n
\n\n\n
\n Traffic information systems have to process and analyze huge amounts of data in real-time to effectively provide traffic information to road users. Progress in mobile communication technology with higher bandwidths and lower latencies enables the use of data provided by in-car sensors. Data stream management systems have been proposed to address the challenges of such applications which have to process a continuous data flow from various data sources in real-time. Data mining methods, adapted to data streams, can be used to analyze the data and to identify interesting patterns such as congestion or road hazards. Although several data stream mining methods have been proposed, an evaluation of such methods in the context of traffic applications is yet missing. In this paper, we present an evaluation framework for traffic information systems based on data streams. We apply a traffic simulation software to emulate the generation of traffic data by mobile probes. The framework is applied in two case studies, namely queue-end detection and traffic state estimation. The results show which parameters of the traffic information system significantly impact the accuracy of the predicted traffic information. This provides important findings for the design and implementation of traffic information systems using data from mobile probes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reasoning with Qualitative Positional Information for Domestic Domains in the Situation Calculus.\n \n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n Journal of Intelligent and Robotic Systems. Special Issue on Domestic Service Robots in the Real World., 66(1–2): 273–300. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"ReasoningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{ Schiffer:Ferrein:Lakemeyer:JINT2011:FuzzyAtHome,\n author      = {Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer},\n affiliation = {Knowledge-based Systems Group, RWTH Aachen University, Aachen, Germany},\n title       = {{Reasoning with Qualitative Positional Information\n               for Domestic Domains in the Situation Calculus}},\n journal     = {Journal of Intelligent and Robotic Systems.\n                {S}pecial Issue on Domestic Service Robots in the Real World.}, \n volume      = "66",\n number      = "1--2",\n pages       = {273--300},\n year        = {2012},\n editor      = {Luca Iocchi and Javier Ruiz-del-Solar and Tijn van der Zant},\n publisher   = {Springer},\n address     = {Netherlands},\n issn        = {0921-0296},\n DOI         = {10.1007/s10846-011-9606-0},\n keywords    = {Qualitative Spatial, Representation, Reasoning, Fuzzy, \n                Logic, Control, Domestic, Service, Robotics},\n abstract    = {In this paper, we present a thorough integration of \n                qualitative representations and reasoning for positional\n                information for domestic service robotics domains into\n                our high-level robot control.\n                In domestic settings for service robots like in the\n                RoboCup@Home competitions, complex tasks such as ``get\n                the cup from the kitchen and bring it to the living room''\n                or ``find me this and that object in the apartment'' have\n                to be accomplished. At these competitions the robots may\n                only be instructed by natural language. As humans use \n                qualitative concepts such as ``near'' or ``far'', the \n                robot needs to cope with them, too.\n                For our domestic robot, we use the robot programming and\n                plan language Readylog, our variant of Golog. In previous\n                work we extended the action language Golog, which was \n                developed for the high-level control of agents and robots,\n                with fuzzy set-based qualitative concepts. \n                We now extend our framework to positional fuzzy fluents \n                with an associated positional context called frames. With\n                that and our underlying reasoning mechanism we can transform\n                qualitative positional information from one context to \n                another to account for changes in context such as the point\n                of view or the scale.\n                We demonstrate how qualitative positional fluents based on\n                a fuzzy set semantics can be deployed in domestic domains\n                and showcase how reasoning with these qualitative notions\n                can seamlessly be applied to a fetch-and-carry task in a\n                RoboCup@Home scenario.},\n pdf         = {http://www.springerlink.com/content/t8526j2275827107/fulltext.pdf},\n url         = {http://www.springer.com/engineering/robotics/journal/10846},\n}\n
\n
\n\n\n
\n In this paper, we present a thorough integration of qualitative representations and reasoning for positional information for domestic service robotics domains into our high-level robot control. In domestic settings for service robots like in the RoboCup@Home competitions, complex tasks such as ``get the cup from the kitchen and bring it to the living room'' or ``find me this and that object in the apartment'' have to be accomplished. At these competitions the robots may only be instructed by natural language. As humans use qualitative concepts such as ``near'' or ``far'', the robot needs to cope with them, too. For our domestic robot, we use the robot programming and plan language Readylog, our variant of Golog. In previous work we extended the action language Golog, which was developed for the high-level control of agents and robots, with fuzzy set-based qualitative concepts. We now extend our framework to positional fuzzy fluents with an associated positional context called frames. With that and our underlying reasoning mechanism we can transform qualitative positional information from one context to another to account for changes in context such as the point of view or the scale. We demonstrate how qualitative positional fluents based on a fuzzy set semantics can be deployed in domestic domains and showcase how reasoning with these qualitative notions can seamlessly be applied to a fetch-and-carry task in a RoboCup@Home scenario.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Fuzzy Representations and Control for Domestic Service Robots in Golog.\n \n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Jeschke, S.; Liu, H.; and Schilberg, D., editor(s), Proceedings of the Fourth International Conference on Intelligent Robotics and Applications (ICIRA 2011), volume 7102, of Lecture Notes in Computer Science, pages 241–250, Berlin / Heidelberg, December 6-9 2011. Springer\n \n\n\n\n
\n\n\n\n \n \n \"FuzzyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Schiffer:Ferrein:Lakemeyer:ICIRA2011:FuzzyAtHome,\n  author      = "Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer",\n  affiliation = "RWTH Aachen University, Germany",\n  title       = "Fuzzy Representations and Control for Domestic Service Robots in Golog",\n  booktitle   = "Proceedings of the Fourth International Conference on\n                 Intelligent Robotics and Applications (ICIRA 2011)",\n  year        = "2011",\n  editor      = {Jeschke, Sabina and Liu, Honghai and Schilberg, Daniel},\n  publisher   = {Springer},\n  address     = {Berlin / Heidelberg},\n  isbn        = {978-3-642-25488-8},\n  pages       = {241--250},\n  series      = "Lecture Notes in Computer Science",\n  volume      = {7102},\n  url         = {http://dx.doi.org/10.1007/978-3-642-25489-5_24},\n  doi         = {10.1007/978-3-642-25489-5_24},\n  month       = "December 6-9",\n  location    = "Aachen, Germany",\n  abstract    = {In the RoboCup@Home domestic service robot\n                 competition, complex tasks such as ~get the cup from\n                 the kitchen and bring it to the living room~ or ~find\n                 me this and that object in the apartment~ have to be\n                 accomplished. At these competitions the robots may\n                 only be instructed by natural language. As humans use\n                 qualitative concepts such as ~near~ or ~far~, the\n                 robot needs to cope with them, too. For our domestic\n                 robot, we use the robot programming and plan language\n                 Readylog, our variant of Golog. In previous work we\n                 extended the action language Golog, which was\n                 developed for the high-level control of agents and\n                 robots, with fuzzy concepts and showed how to embed\n                 fuzzy controllers in Golog. In this paper, we\n                 demonstrate how these notions can be fruitfully\n                 applied to two domestic service robotic scenarios. In\n                 the first application, we demonstrate how qualitative\n                 fluents based on a fuzzy set semantics can be\n                 deployed. In the second program, we show an example\n                 of a fuzzy controller for a follow-a-person task.},\n}\n
\n
\n\n\n
\n In the RoboCup@Home domestic service robot competition, complex tasks such as  get the cup from the kitchen and bring it to the living room  or  find me this and that object in the apartment  have to be accomplished. At these competitions the robots may only be instructed by natural language. As humans use qualitative concepts such as  near  or  far , the robot needs to cope with them, too. For our domestic robot, we use the robot programming and plan language Readylog, our variant of Golog. In previous work we extended the action language Golog, which was developed for the high-level control of agents and robots, with fuzzy concepts and showed how to embed fuzzy controllers in Golog. In this paper, we demonstrate how these notions can be fruitfully applied to two domestic service robotic scenarios. In the first application, we demonstrate how qualitative fluents based on a fuzzy set semantics can be deployed. In the second program, we show an example of a fuzzy controller for a follow-a-person task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Modular Approach to Gesture Recognition for Interaction with a Domestic Service Robot.\n \n \n \n \n\n\n \n Schiffer, S.; Baumgartner, T.; and Lakemeyer, G.\n\n\n \n\n\n\n In Jeschke, S.; Liu, H.; and Schilberg, D., editor(s), Proceedings of the Fourth International Conference on Intelligent Robotics and Applications (ICIRA 2011), volume 7102, of Lecture Notes in Computer Science, pages 348–357, Berlin / Heidelberg, December 6–9 2011. Springer\n \n\n\n\n
\n\n\n\n \n \n \"ALink\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Schiffer:Baumgartner:Lakemeyer:ICIRA2011:ViGoR,\n  author    = {Stefan Schiffer and Tobias Baumgartner and Gerhard Lakemeyer},\n  title     = {A Modular Approach to Gesture Recognition\n               for Interaction with a Domestic Service Robot},\n  booktitle = {Proceedings of the Fourth International Conference on\n               Intelligent Robotics and Applications (ICIRA 2011)},\n  editor    = {Jeschke, Sabina and Liu, Honghai and Schilberg, Daniel},\n  volume    = {7102},\n  pages     = {348--357},\n  year      = {2011},\n  month     = "December 6--9",\n  location  = "Aachen, Germany",\n  publisher = {Springer},\n  address   = {Berlin / Heidelberg},\n  series    = {Lecture Notes in Computer Science},\n  isbn      = {978-3-642-25488-8},\n  ee        = {http://dx.doi.org/10.1007/978-3-642-25489-5_34},\n  doi       = {10.1007/978-3-642-25489-5_34},\n  abstract  = {In this paper, we propose a system for robust and\n               flexible visual gesture recognition on a mobile robot\n               for domestic service robotics applications. This adds\n               a simple yet powerful mode of interaction, especially\n               for the targeted user group of laymen and elderly or\n               disabled people in home environments. Existing\n               approaches often use a monolithic design, are\n               computationally expensive, rely on previously learned\n               (static) color models, or a specific initialization\n               procedure to start gesture recognition. We propose a\n               multi-step modular approach where we iteratively\n               reduce the search space while retaining flexibility\n               and extensibility. Building on a set of existing\n               approaches, we integrate an on-line color calibration\n               and adaptation mechanism for hand detection followed\n               by feature-based posture recognition. Finally, after\n               tracking the hand over time we adopt a simple yet\n               effective gesture recognition method that does not\n               require any training.},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a system for robust and flexible visual gesture recognition on a mobile robot for domestic service robotics applications. This adds a simple yet powerful mode of interaction, especially for the targeted user group of laymen and elderly or disabled people in home environments. Existing approaches often use a monolithic design, are computationally expensive, rely on previously learned (static) color models, or a specific initialization procedure to start gesture recognition. We propose a multi-step modular approach where we iteratively reduce the search space while retaining flexibility and extensibility. Building on a set of existing approaches, we integrate an on-line color calibration and adaptation mechanism for hand detection followed by feature-based posture recognition. Finally, after tracking the hand over time we adopt a simple yet effective gesture recognition method that does not require any training.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AllemaniACs RoboCup@Home 2011 Team Description.\n \n \n \n\n\n \n Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n Technical Report 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{ allemaniacs2011home,\n  title       = {{AllemaniACs RoboCup@Home 2011 Team Description}},\n  author      = {Stefan Schiffer and Gerhard Lakemeyer},\n  affiliation = "RWTH Aachen University, Germany",\n  OPTbooktitle   = {RoboCup German Open 2011 Team Desciption Papers},\n  OPTaddress     = {Magdeburg, Germany},\n  year        = {2011},\n  keywords    = {RoboCup, Home, AllemaniACs, Team Description Paper},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Fuzzy Representations and Control for Domestic Service Robots in Golog.\n \n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Iocchi, L.; Ruiz-del-Solar, J.; and van der Zant, T., editor(s), Domestic Service Robots in the Real World. Workshop Proceedings of the International Conference on Simulation, Modeling and Programming for Autonomous Robots (SIMPAR 2010), pages 183–192, Darmstadt, Germany, November 15-18 2010. \n \n\n\n\n
\n\n\n\n \n \n \"FuzzyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{ Schiffer:Ferrein:Lakemeyer:SIMPAR2010:FuzzyAtHome,\n author    = {Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer},\n title     = {{Fuzzy Representations and Control for Domestic Service Robots in Golog}},\n booktitle = {Domestic Service Robots in the Real World. \n              Workshop Proceedings of the International Conference on\n              Simulation, Modeling and Programming for Autonomous Robots (SIMPAR 2010)},\n pages     = {183--192},\n year      = {2010},\n month     = {November 15-18},\n address   = {Darmstadt, Germany},\n editor    = {Luca Iocchi and Javier Ruiz-del-Solar and Tijn van der Zant},\n OPTpublisher = {},\n isbn      = {978-3-00-032863-3},\n keywords  = {Fuzzy, Logic, Representations, Control, Domestic, Service, Robotics},\n OPTcopy   = {This material is presented to ensure timely\n                  dissemination of scholarly and technical\n                  work. Copyright and all rights therein are retained\n                  by authors or by other copyright holders. All\n                  persons copying this information are expected to\n                  adhere to the terms and constraints invoked by each\n                  authors copyright. In most cases, these works may\n                  not be reposted without the explicit permission of\n                  the copyright holder.},\n abstract  = { In the RoboCup@Home domestic robot competition, complex\n                  tasks such as ``get the cup from the kitchen and\n                  bring it to the living room'' or ``find me this and\n                  that object in the apartment'' have to be\n                  accomplished. At these competitions the robots may\n                  only be instructed by natural language.  As humans\n                  use qualitative concepts such as ``near'' or\n                  ``far'', the robot needs to cope with them, too. For\n                  our domestic robot, we use the robot programming and\n                  plan language Readylog, our variant of Golog. In\n                  previous work we extended the action language Golog,\n                  which was developed for the high-level control of\n                  agents and robots, with fuzzy concepts and showed\n                  how to embed fuzzy controllers in Golog. In this\n                  paper, we demonstrate how these notions can be\n                  fruitfully applied to two Robocup@Home scenarios. In\n                  the first application, we demonstrate how\n                  qualitative fluents based on a fuzzy set semantics\n                  can be deployed.  In the second program, we show an\n                  example of a fuzzy controller for a follow-a-person\n                  task. While these programs have to be regarded as a\n                  proof-of-concept for the possibility to integrate\n                  qualitative concepts into Readylog beneficially for\n                  such applications, we aim at implementing these\n                  programs on our domestic robot platform in the\n                  future.},\n pdf       = {},\n url       = {http://www.dis.uniroma1.it/~iocchi/Events/SIMPAR10-ATHOME/},\n}\n
\n
\n\n\n
\n In the RoboCup@Home domestic robot competition, complex tasks such as ``get the cup from the kitchen and bring it to the living room'' or ``find me this and that object in the apartment'' have to be accomplished. At these competitions the robots may only be instructed by natural language. As humans use qualitative concepts such as ``near'' or ``far'', the robot needs to cope with them, too. For our domestic robot, we use the robot programming and plan language Readylog, our variant of Golog. In previous work we extended the action language Golog, which was developed for the high-level control of agents and robots, with fuzzy concepts and showed how to embed fuzzy controllers in Golog. In this paper, we demonstrate how these notions can be fruitfully applied to two Robocup@Home scenarios. In the first application, we demonstrate how qualitative fluents based on a fuzzy set semantics can be deployed. In the second program, we show an example of a fuzzy controller for a follow-a-person task. While these programs have to be regarded as a proof-of-concept for the possibility to integrate qualitative concepts into Readylog beneficially for such applications, we aim at implementing these programs on our domestic robot platform in the future.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Data Stream-based Evaluation Framework for Traffic Information Systems.\n \n \n \n\n\n \n Geisler, S.; Quix, C.; and Schiffer, S.\n\n\n \n\n\n\n In Ali, M.; Hoel, E.; and Shahabi, C., editor(s), Proceedings of the 1st ACM SIGSPATIAL International Workshop on GeoStreaming (IWGS 2010), pages 11–18, San Jose, CA, USA, November 2 2010. ACM\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{ Geisler:Quix:Schiffer:IWGS2010:QueueEndDetection,\n  author       = {Sandra Geisler and Christoph Quix and Stefan Schiffer},\n  title        = {{A Data Stream-based Evaluation Framework\n                   for Traffic Information Systems}},\n  booktitle    = {Proceedings of the 1st ACM SIGSPATIAL\n                  International Workshop on GeoStreaming (IWGS 2010)},\n  pages        = {11--18},\n  year         = {2010},\n  month        = {November 2},\n  address      = {San Jose, CA, USA},\n  editor       = {Mohamed Ali and Erik Hoel and Cyrus Shahabi},\n  publisher    = {ACM},\n  OPTisbn         = {},\n  keywords     = {Data-Streams, Stream Mining, Traffic Information Systems},\n  abstract     = {Traffic information systems based on mobile, in-car\n                  sensor technology are a challenge for data\n                  management systems as a huge amount of data has to\n                  be processed in real-time. Data mining methods must\n                  be adapted to cope with these challenges in handling\n                  streaming data. Although several data stream mining\n                  methods have been proposed, an evaluation of such\n                  methods in the context of traffic applications is\n                  yet missing. In this paper, we present an evaluation\n                  framework for data stream mining for traffic\n                  applications. We apply a traffic simulation software\n                  to emulate the generation of traffic data by mobile\n                  probes. The framework is evaluated in a first case\n                  study, namely queue-end detection. We show first\n                  results of the evaluation of a data stream mining\n                  method, using multiple parameters for the traffic\n                  simulation. The goal of our work is to identify\n                  parameter settings for which the data stream mining\n                  methods produce useful results for the traffic\n                  application at hand.},\n pdf          = {},\n}\n
\n
\n\n\n
\n Traffic information systems based on mobile, in-car sensor technology are a challenge for data management systems as a huge amount of data has to be processed in real-time. Data mining methods must be adapted to cope with these challenges in handling streaming data. Although several data stream mining methods have been proposed, an evaluation of such methods in the context of traffic applications is yet missing. In this paper, we present an evaluation framework for data stream mining for traffic applications. We apply a traffic simulation software to emulate the generation of traffic data by mobile probes. The framework is evaluated in a first case study, namely queue-end detection. We show first results of the evaluation of a data stream mining method, using multiple parameters for the traffic simulation. The goal of our work is to identify parameter settings for which the data stream mining methods produce useful results for the traffic application at hand.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Self-Maintenance for Autonomous Robots in the Situation Calculus.\n \n \n \n \n\n\n \n Schiffer, S.; Wortmann, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Lakemeyer, G.; Levesque, H. J.; and Pirri, F., editor(s), Cognitive Robotics, of Dagstuhl Seminar Proceedings, Dagstuhl, Germany, June 16-17 2010. Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik, Germany\n \n\n\n\n
\n\n\n\n \n \n \"Self-MaintenancePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ SchifferEtAl:CogRob2010:SMaRT,\n  author    = {Stefan Schiffer and Andreas Wortmann and Gerhard Lakemeyer},\n  title     = {{Self-Maintenance for Autonomous Robots in the Situation Calculus}},\n  booktitle = {Cognitive Robotics},\n  year      = {2010},\n  month     = {June 16-17},\n  editor    = {Gerhard Lakemeyer and Hector J. Levesque and Fiora Pirri},\n  number    = {10081},\n  series    = {Dagstuhl Seminar Proceedings},\n  ISSN      = {1862-4405},\n  publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik, Germany},\n  address   = {Dagstuhl, Germany},\n  URL       = {http://drops.dagstuhl.de/opus/volltexte/2010/2636},\n  annote    = {Keywords: Domestic mobile robotics, self-maintenance, robustness},\n  abstract  = { In order to make a robot execute a given task plan\n                  more robustly we want to enable it to take care of\n                  its self-maintenance requirements during online\n                  execution of this program. This requires the robot\n                  to know about the (internal) states of its\n                  components, constraints that restrict execution of\n                  certain actions and possibly also how to recover\n                  from faulty situations. The general idea is to\n                  implement a transformation process on the plans,\n                  which are specified in the agent programming\n                  language ReadyLog, to be performed based on explicit\n                  (temporal) constraints. Afterwards, a 'guarded'\n                  execution of the transformed program should result\n                  in more robust behavior.},\n}\n
\n
\n\n\n
\n In order to make a robot execute a given task plan more robustly we want to enable it to take care of its self-maintenance requirements during online execution of this program. This requires the robot to know about the (internal) states of its components, constraints that restrict execution of certain actions and possibly also how to recover from faulty situations. The general idea is to implement a transformation process on the plans, which are specified in the agent programming language ReadyLog, to be performed based on explicit (temporal) constraints. Afterwards, a 'guarded' execution of the transformed program should result in more robust behavior.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Self-Maintenance for Autonomous Robots controlled by ReadyLog.\n \n \n \n\n\n \n Schiffer, S.; Wortmann, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Ingrand, F.; and Guiochet, J., editor(s), Proceedings of the 7th IARP Workshop on Technical Challenges for Dependable Robots in Human Environments, pages 101–107, Toulouse, France, June 16-17 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{ SchifferEtAl:DRHE2010:SMaRT,\n author    = {Stefan Schiffer and Andreas Wortmann and Gerhard Lakemeyer},\n title     = {{Self-Maintenance for Autonomous Robots controlled by ReadyLog}},\n booktitle = {Proceedings of the 7th IARP Workshop on Technical Challenges for Dependable Robots in Human Environments},\n pages     = {101--107},\n year      = {2010},\n month     = {June 16-17},\n address   = {Toulouse, France},\n editor    = {Felix Ingrand and Jeremie Guiochet},\n OPTpublisher = {},\n isbn      = {},\n keywords  = {Self-Maintenance, Autonomous Robots, Situation Calculus, ReadyLog},\n OPTcopy   = {This material is presented to ensure timely\n                  dissemination of scholarly and technical\n                  work. Copyright and all rights therein are retained\n                  by authors or by other copyright holders. All\n                  persons copying this information are expected to\n                  adhere to the terms and constraints invoked by each\n                  authors copyright. In most cases, these works may\n                  not be reposted without the explicit permission of\n                  the copyright holder.},\n abstract  = { In order to make a robot execute a given task plan more\n                  robustly we want to enable it to take care of its\n                  self-maintenance requirements during online\n                  execution of this plan. This requires the robot to\n                  know about the (internal) states of its components,\n                  constraints that restrict execution of actions and\n                  how to recover from faulty situations. The general\n                  idea is to implement a transformation process on the\n                  plans, which are specified in the agent programming\n                  language \\ReadyLog{}, to be performed based on\n                  explicit qualitative temporal\n                  constraints. Afterwards, a 'guarded' execution of\n                  the transformed program results in more robust\n                  behavior.},\n pdf       = {},\n OPTnote   = {to appear},\n}\n
\n
\n\n\n
\n In order to make a robot execute a given task plan more robustly we want to enable it to take care of its self-maintenance requirements during online execution of this plan. This requires the robot to know about the (internal) states of its components, constraints that restrict execution of actions and how to recover from faulty situations. The general idea is to implement a transformation process on the plans, which are specified in the agent programming language \\ReadyLog, to be performed based on explicit qualitative temporal constraints. Afterwards, a 'guarded' execution of the transformed program results in more robust behavior.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n RoboCup@Home: Scientific Competition and Benchmarking for Domestic Service Robots.\n \n \n \n \n\n\n \n Wisspeintner, T.; van der Zant, T.; Iocchi, L.; and Schiffer, S.\n\n\n \n\n\n\n Interaction Studies. Special Issue on Robots in the Wild, 10(3): 392–426. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"RoboCup@Home:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{ JISSI2009AtHome, \n  author    = "Thomas Wisspeintner and Tijn van der Zant and Luca Iocchi and Stefan Schiffer",\n  title     = "RoboCup@Home: {S}cientific {C}ompetition and {B}enchmarking for {D}omestic {S}ervice {R}obots",\n  journal   = "Interaction Studies. {S}pecial Issue on Robots in the Wild",\n  editor    = "Kerstin Dautenhahn and Angelo Cangelosi",\n  year      = "2009",\n  volume    = "10",\n  number    = "3",\n  pages     = "392--426",\n  doi       = "10.1075/is.10.3.06wis",\n  url       = "http://www.benjamins.com/cgi-bin/t_articles.cgi?bookid=IS\\%2010\\%3A3&artid=125131656",\n  publisher = "John Benjamins Publishing",\n  abstract  = "Being part of the RoboCup initiative, the RoboCup@Home\n                  league targets the development and deployment of\n                  autonomous service and assistive robot technology\n                  being essential for future personal domestic\n                  applications.  The domain of domestic service and\n                  assistive robotics implicates a wide range of\n                  possible problems. The primary reasons for this\n                  include the large amount of uncertainty in the\n                  dynamic and non-standardized environments of the\n                  real world, and the related human\n                  interaction. Furthermore, the application\n                  orientation requires a large effort towards high\n                  level integration combined with a demand for general\n                  robustness of the systems. This article details the\n                  need for interdisciplinary community effort to\n                  iteratively identify related problems, to define\n                  benchmarks, to test and, finally, to solve the\n                  problems. The concepts and the implementation of the\n                  RoboCup@Home initiative as a combination of\n                  scientific exchange and competition is presented as\n                  an efficient method to accelerate and focus\n                  technological and scientific progress in the domain\n                  of domestic service robots. Finally, the progress in\n                  terms of performance increase in the benchmarks and\n                  technological advancements is evaluated and\n                  discussed.",\n  keywords = "Domestic Service Robotics, Application, Uncertainty, Benchmark, Competition, Human-Robot Interaction, RoboCup@Home",\n}\n\n
\n
\n\n\n
\n Being part of the RoboCup initiative, the RoboCup@Home league targets the development and deployment of autonomous service and assistive robot technology being essential for future personal domestic applications. The domain of domestic service and assistive robotics implicates a wide range of possible problems. The primary reasons for this include the large amount of uncertainty in the dynamic and non-standardized environments of the real world, and the related human interaction. Furthermore, the application orientation requires a large effort towards high level integration combined with a demand for general robustness of the systems. This article details the need for interdisciplinary community effort to iteratively identify related problems, to define benchmarks, to test and, finally, to solve the problems. The concepts and the implementation of the RoboCup@Home initiative as a combination of scientific exchange and competition is presented as an efficient method to accelerate and focus technological and scientific progress in the domain of domestic service robots. Finally, the progress in terms of performance increase in the benchmarks and technological advancements is evaluated and discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Embedding Fuzzy Controllers into Golog.\n \n \n \n\n\n \n Ferrein, A.; Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings of the IEEE International Conference on Fuzzy Systems (FUZZ-IEEE'09), pages 894–899, August 20-24 2009. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Ferrein:Schiffer:Lakemeyer:2009:FUZZ-IEEE:Pole,\n  author      = "Alexander Ferrein and Stefan Schiffer and Gerhard Lakemeyer",\n  affiliation = "RWTH Aachen University, Germany",\n  title       = "Embedding Fuzzy Controllers into Golog",\n  booktitle   = "Proceedings of the IEEE International Conference on Fuzzy Systems (FUZZ-IEEE'09)",\n  pages       = "894--899",\n  year        = "2009",\n  month       = "August 20-24",\n  location    = "ICC Jeju, Jeju Island, Korea",\n  publisher   = "IEEE",\n  doi         = {10.1109/FUZZY.2009.5277161},\n  ISSN        = {1098-7584},\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust Collision Avoidance in Unknown Domestic Environments.\n \n \n \n\n\n \n Jacobs, S.; Ferrein, A.; Schiffer, S.; Beck, D.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings of the International RoboCup Symposium 2009 (RoboCup 2009), volume 5949, of LNCS, pages 116–127, June 30 – July 3 2009. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Jacobs:Ferrein:Schiffer:Beck:Lakemeyer:2009:RoboCup:Colli,\n  author      = "Stefan Jacobs and Alexander Ferrein and \n                 Stefan Schiffer and Daniel Beck and Gerhard Lakemeyer",\n  affiliation = "RWTH Aachen University, Germany",\n  title       = "Robust Collision Avoidance in Unknown Domestic Environments",\n  booktitle   = "Proceedings of the International RoboCup Symposium 2009 (RoboCup 2009)",\n  pages       = "116--127",\n  year        = "2009",\n  month       = "June 30 -- July 3",\n  location    = "Graz, Austria",\n  publisher   = "Springer",\n  series      = "LNCS",\n  volume      = "5949",\n  number      = "",\n  isbn        = "978-3-642-11875-3",\n  ISSN        = "0302-9743 (Print) 1611-3349 (Online)",\n  doi         = "10.1007/978-3-642-11876-0_11",\n  abstract    = "Service robots operating in domestic indoor\n                  environments must be endowed with a safe collision\n                  avoidance and navigation method that is reactive\n                  enough to avoid contacts with the furniture of the\n                  apartment and humans that suddenly appear in front\n                  of the robot.  Moreover, the method should be local,\n                  i.e. should not need a predefined map of the\n                  environment. In this paper we describe a navigation\n                  and collision avoidance method which is all of that:\n                  safe, fast, and local. Based on a geometric grid\n                  representation which is derived from the laser range\n                  finder of our domestic robot, a path to the next\n                  target point is found by employing \\AStar{}. The\n                  obstacles which are used in the local map of the\n                  robot are extended depending on the speed the robot\n                  travels at. We compute a triangular area in front of\n                  the robot which is guaranteed to be free of\n                  obstacles. This triangle serves as the space of\n                  feasible solutions when searching for the next drive\n                  commands. With this triangle, we are able to\n                  decouple the path search from the search for drive\n                  commands, which tremendously decreases the\n                  complexity. We used the proposed method for several\n                  years in RoboCup@Home where it was a key factor to\n                  our success in the competitions.",\n}\n\n
\n
\n\n\n
\n Service robots operating in domestic indoor environments must be endowed with a safe collision avoidance and navigation method that is reactive enough to avoid contacts with the furniture of the apartment and humans that suddenly appear in front of the robot. Moreover, the method should be local, i.e. should not need a predefined map of the environment. In this paper we describe a navigation and collision avoidance method which is all of that: safe, fast, and local. Based on a geometric grid representation which is derived from the laser range finder of our domestic robot, a path to the next target point is found by employing \\AStar. The obstacles which are used in the local map of the robot are extended depending on the speed the robot travels at. We compute a triangular area in front of the robot which is guaranteed to be free of obstacles. This triangle serves as the space of feasible solutions when searching for the next drive commands. With this triangle, we are able to decouple the path search from the search for drive commands, which tremendously decreases the complexity. We used the proposed method for several years in RoboCup@Home where it was a key factor to our success in the competitions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n RoboCup@Home: Results in Benchmarking Domestic Service Robots.\n \n \n \n\n\n \n Wisspeintner, T.; van der Zant, T.; Iocchi, L.; and Schiffer, S.\n\n\n \n\n\n\n In Proceedings of the International RoboCup Symposium 2009 (RoboCup 2009), volume 5949, of LNCS, pages 390–401, June 30 – July 3 2009. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Wisspeintner:Zant:Iocchi:Schiffer:2009:RoboCup:Home,\n  author      = "Thomas Wisspeintner and Tijn van der Zant \n                 and Luca Iocchi and Stefan Schiffer",\n  title       = "RoboCup@Home: Results in Benchmarking Domestic Service Robots",\n  booktitle   = "Proceedings of the International RoboCup Symposium 2009 (RoboCup 2009)",\n  pages       = "390--401",\n  year        = "2009",\n  month       = "June 30 -- July 3",\n  location    = "Graz, Austria",\n  publisher   = "Springer",\n  series      = "LNCS",\n  volume      = "5949",\n  number      = "",\n  isbn        = "978-3-642-11875-3",\n  ISSN        = "0302-9743 (Print) 1611-3349 (Online)",\n  doi         = "10.1007/978-3-642-11876-0_34",\n  note        = "",\n  abstract    = " Benchmarking robotic technologies is of utmost\n                  importance for actual deployment of robotic\n                  applications in industrial and every-day\n                  environments, therefore many efforts have recently\n                  focused on this problem. Among the many different\n                  ways of benchmarking robotic systems, scientific\n                  competitions are recognized as one of the most\n                  effective ways of rapid development of scientific\n                  progress in a field.  The RoboCup@Home league\n                  targets the development and deployment of autonomous\n                  service and assistive robot technology, being\n                  essential for future personal domestic applications,\n                  and offers an important approach to benchmarking\n                  domestic and service robots.  In this paper we\n                  present the new methodology for benchmarking DSR\n                  adopted in RoboCup@Home, that includes the\n                  definition of multiple benchmarks (tests) and of\n                  performance metrics based on the relationships\n                  between key abilities required to the robots and the\n                  tests. We also discuss the results of our\n                  benchmarking approach over the past years and\n                  provide an outlook on short- and mid-term goals of\n                  @Home and of DSR in general.",\n}\n\n
\n
\n\n\n
\n Benchmarking robotic technologies is of utmost importance for actual deployment of robotic applications in industrial and every-day environments, therefore many efforts have recently focused on this problem. Among the many different ways of benchmarking robotic systems, scientific competitions are recognized as one of the most effective ways of rapid development of scientific progress in a field. The RoboCup@Home league targets the development and deployment of autonomous service and assistive robot technology, being essential for future personal domestic applications, and offers an important approach to benchmarking domestic and service robots. In this paper we present the new methodology for benchmarking DSR adopted in RoboCup@Home, that includes the definition of multiple benchmarks (tests) and of performance metrics based on the relationships between key abilities required to the robots and the tests. We also discuss the results of our benchmarking approach over the past years and provide an outlook on short- and mid-term goals of @Home and of DSR in general.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AllemaniACs@Home 2009 Team Description.\n \n \n \n\n\n \n Schiffer, S.; Niemüller, T.; Doostdar, M.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings CD RoboCup 2009, Graz, Austria, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ allemaniacs2009home,\n  title       = {{AllemaniACs@Home 2009 Team Description}},\n  author      = {Stefan Schiffer and Tim Niem{\\"u}ller and Masrur Doostdar and Gerhard Lakemeyer},\n  affiliation = "RWTH Aachen University, Germany",\n  booktitle   = {Proceedings CD RoboCup 2009},\n  address     = {Graz, Austria},\n  year        = {2009},\n  keywords    = {RoboCup, Home, AllemaniACs, Team Description Paper},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n RoboCup@Home 2008: Analysis of results.\n \n \n \n \n\n\n \n Wisspeintner, T.; van der Zant, T.; Iocchi, L.; and Schiffer, S.\n\n\n \n\n\n\n Technical Report 2008.\n \n\n\n\n
\n\n\n\n \n \n \"RoboCup@HomePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{ robocupathome2008result, \n  title    = {{RoboCup@Home 2008: Analysis of results}},\n  author   = {Wisspeintner, Thomas and van der Zant, Tijn and Iocchi, Luca and Schiffer, Stefan},\n  year     = {2008},\n  url      = {http://www.dis.uniroma1.it/~iocchi/publications/robocupathome2008_short.pdf},\n  abstract =     {This report describes an analysis of performance of\n                  teams participating at the RoboCup@Home 2008\n                  competition, in Suzhou, China. The analysis has been\n                  performed by defining a set of key abilities that\n                  are required to RoboCup@Home teams, by relating them\n                  with sub-scores of the tests and by measuring team\n                  performance on such abilities. Results are useful to\n                  evaluate performance of teams, dif.culty of each\n                  ability in the tests and to plan changes in the\n                  tests.},\n} 
\n
\n\n\n
\n This report describes an analysis of performance of teams participating at the RoboCup@Home 2008 competition, in Suzhou, China. The analysis has been performed by defining a set of key abilities that are required to RoboCup@Home teams, by relating them with sub-scores of the tests and by measuring team performance on such abilities. Results are useful to evaluate performance of teams, dif.culty of each ability in the tests and to plan changes in the tests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust Speech Recognition for Service Robotics Applications.\n \n \n \n\n\n \n Doostdar, M.; Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings of the International RoboCup Symposium 2008 (RoboCup 2008), volume 5399, of LNCS, pages 1–12, July 14-18 2008. Springer\n Best Student Paper Award\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Doostdar:Schiffer:Lakemeyer:2008:RoboCup:RoiSpeR,\n  author      = "Masrur Doostdar and Stefan Schiffer and Gerhard Lakemeyer",\n  affiliation = "RWTH Aachen University, Germany",\n  title       = "Robust Speech Recognition for Service Robotics Applications",\n  booktitle   = "Proceedings of the International RoboCup Symposium 2008 (RoboCup 2008)",\n  pages       = "1--12",\n  year        = "2008",\n  month       = "July 14-18",\n  location    = "Suzhou, China",\n  publisher   = "Springer",\n  series      = "LNCS",\n  volume      = "5399",\n  number      = "",\n  isbn        = "978-3-642-02920-2",\n  ISSN        = "0302-9743 (Print) 1611-3349 (Online)",\n  doi         = "10.1007/978-3-642-02921-9_1",\n  note        = "Best Student Paper Award",\n  abstract    = "Mobile service robots in human environments need to\n                  have versatile abilities to perceive and to interact\n                  with their environment.  Spoken language is a\n                  natural way to interact with a robot, in general,\n                  and to instruct it, in particular. However, most\n                  existing speech recognition systems often suffer\n                  from high environmental noise present in the target\n                  domain and they require in-depth knowledge of the\n                  underlying theory in case of necessary adaptation to\n                  reach the desired accuracy.  We propose and evaluate\n                  an architecture for a robust speaker independent\n                  speech recognition system using off-the-shelf\n                  technology and simple additional methods. We first\n                  use close speech detection to segment closed\n                  utterances which alleviates the recognition process.\n                  By further utilizing a combination of an FSG based\n                  and an $N$-gram based speech decoder we reduce false\n                  positive recognitions while achieving high\n                  accuracy.",\n}\n
\n
\n\n\n
\n Mobile service robots in human environments need to have versatile abilities to perceive and to interact with their environment. Spoken language is a natural way to interact with a robot, in general, and to instruct it, in particular. However, most existing speech recognition systems often suffer from high environmental noise present in the target domain and they require in-depth knowledge of the underlying theory in case of necessary adaptation to reach the desired accuracy. We propose and evaluate an architecture for a robust speaker independent speech recognition system using off-the-shelf technology and simple additional methods. We first use close speech detection to segment closed utterances which alleviates the recognition process. By further utilizing a combination of an FSG based and an $N$-gram based speech decoder we reduce false positive recognitions while achieving high accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Fuzzy Set Semantics for Qualitative Fluents in the Situation Calculus.\n \n \n \n \n\n\n \n Ferrein, A.; Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings of the International Conference on Intelligent Robotics and Applications (ICIRA'08), of LNCS, pages 498–509, October 15-17 2008. Springer\n \n\n\n\n
\n\n\n\n \n \n \"ALink\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Ferrein:Schiffer:Lakemeyer:2008:ICIRA:Fuzzy,\n  author      = "Alexander Ferrein and Stefan Schiffer and Gerhard Lakemeyer",\n  affiliation = "RWTH Aachen University, Germany",\n  title       = "A Fuzzy Set Semantics for Qualitative Fluents in the Situation Calculus",\n  booktitle   = "Proceedings of the International Conference on Intelligent Robotics and Applications (ICIRA'08)",\n  pages       = "498--509",\n  year        = "2008",\n  month       = "October 15-17",\n  location    = "Wuhan, China",\n  publisher   = "Springer",\n  series      = "LNCS",\n  OPTnumber      = "",\n  isbn        = {978-3-540-88512-2},\n  doi         = {10.1007/978-3-540-88513-9_54},\n  ee          = {http://dx.doi.org/10.1007/978-3-540-88513-9_54},\n  abstract    = " Specifying the behavior of an intelligent autonomous\n                  robot or agent is a non-trivial task. The question\n                  is: how can the knowledge of the domain expert be\n                  encoded in the agent program?  Qualitative\n                  representations in general facilitate to express the\n                  knowledge of a domain expert. In this paper, we\n                  propose a semantics for qualitative fluents in the\n                  situation calculus. Our semantics is based on fuzzy\n                  sets. Membership functions define to which degree a\n                  qualitative fluent belongs to a particular category.\n                  Especially intriguing about a fuzzy logic semantic\n                  for qualitative fluents is that the qualitative\n                  ranges may overlap, and a value can, at the same\n                  time, fall into several categories.",\n}\n
\n
\n\n\n
\n Specifying the behavior of an intelligent autonomous robot or agent is a non-trivial task. The question is: how can the knowledge of the domain expert be encoded in the agent program? Qualitative representations in general facilitate to express the knowledge of a domain expert. In this paper, we propose a semantics for qualitative fluents in the situation calculus. Our semantics is based on fuzzy sets. Membership functions define to which degree a qualitative fluent belongs to a particular category. Especially intriguing about a fuzzy logic semantic for qualitative fluents is that the qualitative ranges may overlap, and a value can, at the same time, fall into several categories.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Randomized Trees for Real-Time One-Step Face Detection and Recognition.\n \n \n \n \n\n\n \n Belle, V.; Deselaers, T.; and Schiffer, S.\n\n\n \n\n\n\n In Proceedings of the 19th International Conference on Pattern Recognition (ICPR'08), pages 1–4, December 8-11 2008. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n \n \"RandomizedLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 15 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Belle:Deselaers:Schiffer:2008:ICPR:RFF,\n  author       = "Vaishak Belle and Thomas Deselaers and Stefan Schiffer",\n  affiliation  = "RWTH Aachen University, Germany",\n  title        = "Randomized Trees for Real-Time One-Step Face Detection and Recognition",\n  booktitle    = "Proceedings of the 19th International Conference on Pattern Recognition (ICPR'08)",\n  pages        = "1--4",\n  year         = "2008",\n  month        = "December 8-11",\n  location     = "Tampa, Florida, USA",\n  publisher    = "{IEEE Computer Society}",\n  isbn         = "978-1-4244-2175-6",\n  ee           = {http://dx.doi.org/10.1109/ICPR.2008.4761365},\n  abstract     = "We present a system for detecting and recognizing\n                  faces in images in real-time which is able to learn\n                  new identities in instants.  In mobile service\n                  robotics, interaction with persons is becoming\n                  increasingly important, real-time performance is\n                  required and the introduction of new persons is a\n                  necessary feature for many applications.  Although\n                  face detection and face recognition are well\n                  studied, only a few papers address both problems\n                  jointly and only few systems are able to learn to\n                  identify new persons quickly.  To achieve real-time\n                  performance on modest computing hardware, we use\n                  random forests for both detection and recognition,\n                  and compare with well-known techniques such as\n                  boosted face detection and support vector machines\n                  for identification. Results are presented on\n                  different datasets and compare favorably well to\n                  competitive methods.",\n}\n
\n
\n\n\n
\n We present a system for detecting and recognizing faces in images in real-time which is able to learn new identities in instants. In mobile service robotics, interaction with persons is becoming increasingly important, real-time performance is required and the introduction of new persons is a necessary feature for many applications. Although face detection and face recognition are well studied, only a few papers address both problems jointly and only few systems are able to learn to identify new persons quickly. To achieve real-time performance on modest computing hardware, we use random forests for both detection and recognition, and compare with well-known techniques such as boosted face detection and support vector machines for identification. Results are presented on different datasets and compare favorably well to competitive methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Approaching a Formal Soccer Theory from Behaviour Specifications in Robotic Soccer.\n \n \n \n \n\n\n \n Dylla, F.; Ferrein, A.; Lakemeyer, G.; Murray, J.; Obst, O.; Röfer, T.; Schiffer, S.; Stolzenburg, F.; Visser, U.; and Wagner, T.\n\n\n \n\n\n\n In Dabnicki, P.; and Baca, A., editor(s), Computer Science and Sports, pages 161–185. WIT Press, London, 2008.\n \n\n\n\n
\n\n\n\n \n \n \"ApproachingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INCOLLECTION{ dfl+2008approaching,\n   AUTHOR    = {Frank Dylla and Alexander Ferrein and Gerhard\n                Lakemeyer and Jan Murray and Oliver Obst and\n                Thomas R{\\"o}fer and Stefan Schiffer and Frieder\n                Stolzenburg and Ubbo Visser and Thomas Wagner},\n   TITLE     = {Approaching a Formal Soccer Theory from\n                Behaviour Specifications in Robotic Soccer},\n   YEAR      = {2008},\n   BOOKTITLE = {Computer Science and Sports},\n   PAGES     = {161--185},\n   EDITOR    = {Peter Dabnicki and Arnold Baca},\n   PUBLISHER = {WIT Press},\n   ADDRESS   = {London},\n   ISBN      = {978-1845640644},\n   NOTE      = {},\n   URL       = {http://kbsg.rwth-aachen.de/~schiffer/pubs/dfl+2008approaching.pdf},\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AllemaniACs@Home 2008 Team Description.\n \n \n \n\n\n \n Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings CD RoboCup 2008, Suzhou, China, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ allemaniacs2008home,\n  title       = {AllemaniACs@Home 2008 Team Description},\n  author      = {Stefan Schiffer and Gerhard Lakemeyer},\n  affiliation = "RWTH Aachen University, Germany",\n  booktitle   = {Proceedings CD RoboCup 2008},\n  address     = {Suzhou, China},\n  year        = {2008},\n  keywords    = {RoboCup, @Home, AllemaniACs, Team Description Paper},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Combining Sound Localization and Laser-based Object Recognition.\n \n \n \n\n\n \n Calmes, L.; Wagner, H.; Schiffer, S.; and Lakemeyer, G.\n\n\n \n\n\n\n In Tapus, A.; Michalowski, M.; and Sabanovic, S., editor(s), Papers from the AAAI Spring Symposium (AAAI-SS 2007), pages 1–6, Stanford, CA, March 26-28 2007. AAAI Press\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Calmes:Wagner:Schiffer:Lakemeyer:2007:AAAI-SS:SouLabOR,\n  author      = "Laurent Calmes and Hermann Wagner and Stefan Schiffer and Gerhard Lakemeyer",\n  affiliation = "RWTH Aachen University, Germany",\n  title       = "Combining Sound Localization and Laser-based Object Recognition",\n  booktitle   = "Papers from the AAAI Spring Symposium (AAAI-SS 2007)",\n  pages       = "1--6",\n  year        = "2007",\n  month       = "March 26-28",\n  location    = "Stanford, California, USA",\n  editor      = "Adriana Tapus and Marek Michalowski and Selma Sabanovic",\n  publisher   = "AAAI Press",\n  address     = "Stanford, CA",\n  isbn        = "978-1-57735-316-4",\n  pdf         = "http://www.aaai.org/Papers/Symposia/Spring/2007/SS-07-07/SS07-07-001.pdf",\n  doi         = "",\n  abstract    = "Mobile robots, in general, and service robots in\n                  human environments, in particular, need to have\n                  versatile abilities to perceive and interact with\n                  their environment. Biologically inspired sound\n                  source localization is an interesting ability for\n                  such a robot. When combined with other sensory input\n                  both the sound localization and the general\n                  interaction abilities can be improved. In\n                  particular, spatial filtering can be used to improve\n                  the signal-to-noise ratio of speech signals\n                  emanating from a given direction in order to enhance\n                  speech recognition abilities. In this paper we\n                  investigate and discuss the combination of sound\n                  source localization and laser-based object\n                  recognition on a mobile robot."\n}\n
\n
\n\n\n
\n Mobile robots, in general, and service robots in human environments, in particular, need to have versatile abilities to perceive and interact with their environment. Biologically inspired sound source localization is an interesting ability for such a robot. When combined with other sensory input both the sound localization and the general interaction abilities can be improved. In particular, spatial filtering can be used to improve the signal-to-noise ratio of speech signals emanating from a given direction in order to enhance speech recognition abilities. In this paper we investigate and discuss the combination of sound source localization and laser-based object recognition on a mobile robot.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AllemaniACs@Home 2007 Team Description.\n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings CD RoboCup 2007, Atlanta, USA, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ allemaniacs2007home,\n  title       = {AllemaniACs@Home 2007 Team Description},\n  author      = {Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer},\n  affiliation = "RWTH Aachen University, Germany",\n  booktitle   = {Proceedings CD RoboCup 2007},\n  address     = {Atlanta, USA},\n  year        = {2007},\n  keywords    = {RoboCup, @Home, AllemaniACs, Team Description Paper},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Football is coming home.\n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Proceedings of the 2006 International Symposium on Practical Cognitive Agents and Robots (PCAR'06), pages 39–50, New York, NY, USA, November 27-28 2006. ACM\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ Schiffer:Ferrein:Lakemeyer:2006:PCAR:Football,\n  author      = {Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer},\n  title       = {Football is coming home},\n  booktitle   = {Proceedings of the 2006 International Symposium on Practical Cognitive Agents and Robots (PCAR'06)},\n  year        = {2006},\n  month       = {November 27-28},\n  isbn        = {1-74052-130-7},\n  pages       = {39--50},\n  location    = {Perth, Australia},\n  doi         = {http://doi.acm.org/10.1145/1232425.1232433},\n  publisher   = {ACM},\n  address     = {New York, NY, USA},\n  abstract    = " Most of the robots in the RoboCup soccer league are\n                  made especially for the task of playing soccer. They\n                  use methods that are specifically designed for the\n                  soccer domain and would perhaps fail in other\n                  robotic testbeds such as the newly established\n                  AtHome league without making fundamental changes\n                  throughout their entire software system. In\n                  contrast, our robots and the control software were\n                  designed with a broader field of application in\n                  mind. This paper sketches our way from the soccer\n                  application to the AtHome league.",\n  opturl      = "http://agents.csse.uwa.edu.au/pcar/",\n}\n
\n
\n\n\n
\n Most of the robots in the RoboCup soccer league are made especially for the task of playing soccer. They use methods that are specifically designed for the soccer domain and would perhaps fail in other robotic testbeds such as the newly established AtHome league without making fundamental changes throughout their entire software system. In contrast, our robots and the control software were designed with a broader field of application in mind. This paper sketches our way from the soccer application to the AtHome league.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Qualitative World Models for Soccer Robots.\n \n \n \n\n\n \n Schiffer, S.; Ferrein, A.; and Lakemeyer, G.\n\n\n \n\n\n\n In Wölfl, S.; and Mossakowski, T., editor(s), Qualitative Constraint Calculi, Workshop at KI 2006, Bremen, pages 3–14, 2006. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{schifferFL06kiqcc, \n   AUTHOR    = {Stefan Schiffer and Alexander Ferrein and Gerhard Lakemeyer},\n   TITLE     = {Qualitative World Models for Soccer Robots},\n   OPTCROSSREF = {ki2006qcc},\n   BOOKTITLE = {Qualitative Constraint Calculi, Workshop at KI 2006, Bremen},\n   PAGES     = {3--14},\n   YEAR      = {2006},\n   EDITOR    = {Stefan W{\\"o}lfl and Till Mossakowski},\n   isbn      = {3-88722-666-6},\n   abstract  = " Until now world models in robotic soccer have been\n                  mainly quantitative in nature, consisting of\n                  fine-grained (numerical) estimates of player\n                  positions, ball trajectories, and the like. In\n                  contrast, the concepts used in human soccer are\n                  largely qualitative.  Moving to qualitative world\n                  models also for robots has the advantage that it\n                  drastically reduces the space of possible game\n                  situations that need to be considered and, provided\n                  the concepts correspond to those in human soccer\n                  theory, it eases the task of agent specification for\n                  the designer. In this paper we propose qualitative\n                  representations using ideas from spatial cognition\n                  and employing Voronoi diagrams. We also discuss how\n                  reasoning with these representations is achieved\n                  within our underlying agent programming framework.",\n}\n\n\n
\n
\n\n\n
\n Until now world models in robotic soccer have been mainly quantitative in nature, consisting of fine-grained (numerical) estimates of player positions, ball trajectories, and the like. In contrast, the concepts used in human soccer are largely qualitative. Moving to qualitative world models also for robots has the advantage that it drastically reduces the space of possible game situations that need to be considered and, provided the concepts correspond to those in human soccer theory, it eases the task of agent specification for the designer. In this paper we propose qualitative representations using ideas from spatial cognition and employing Voronoi diagrams. We also discuss how reasoning with these representations is achieved within our underlying agent programming framework.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AllemaniACs@Home 2006 Team Description.\n \n \n \n\n\n \n Ferrein, A.; Lakemeyer, G.; and Schiffer, S.\n\n\n \n\n\n\n In Proceedings CD RoboCup 2006, Bremen, Germany, 2006. RoboCup Federation\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ allemaniacs2006home,\n  title       = {AllemaniACs@Home 2006 Team Description},\n  author      = {Alexander Ferrein and Gerhard Lakemeyer and Stefan Schiffer},\n  affiliation = "RWTH Aachen University, Germany",\n  booktitle   = {Proceedings CD RoboCup 2006},\n  address     = {Bremen, Germany},\n  publisher   = {RoboCup Federation},\n  year        = {2006},\n  keywords    = {RoboCup, @Home, AllemaniACs, Team Description Paper},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Collision Avoidance in Real-Time with Look-Ahead (CARLA).\n \n \n \n\n\n \n Dylla, F.; Ferrein, A.; Jacobs, S.; Lakemeyer, G.; Richterich, C.; and Schiffer, S.\n\n\n \n\n\n\n In Proceedings of the 3rd Workshop on Environment and Motion Modelling (UB 2002), 2002. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{ Dylla:Ferrein:Jacobs:Lakemeyer:Richterich:Schiffer:2002:UMBW:Carla,\n  author      = {Frank Dylla and Alexander Ferrein and Stefan Jacobs and Gerhard Lakemeyer and Claus Richterich and Stefan Schiffer},\n  affiliation = "{RWTH Aachen University}",\n  title       = {Collision {A}voidance in {R}eal-{T}ime with {L}ook-{A}head ({CARLA})},\n  booktitle   = "{Proceedings of the 3rd Workshop on Environment and Motion Modelling (UB 2002)}",\n  year        = {2002},\n  location    = "Aachen, Germany",\n  keywords    = {collision avoidance, robotics, motion planning},\n  abstract    = {A heuristic collision avoidance approach for fast robots (over 2.5m/s) in office domains and robotic soccer},\n}\n
\n
\n\n\n
\n A heuristic collision avoidance approach for fast robots (over 2.5m/s) in office domains and robotic soccer\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);