var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/service/mendeley/f67eca1d-c11e-3ca8-8fb7-763c9c95282d/group/69b6a506-7e92-3ad9-b886-fd05207c084b?jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/service/mendeley/f67eca1d-c11e-3ca8-8fb7-763c9c95282d/group/69b6a506-7e92-3ad9-b886-fd05207c084b?jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/service/mendeley/f67eca1d-c11e-3ca8-8fb7-763c9c95282d/group/69b6a506-7e92-3ad9-b886-fd05207c084b?jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2022\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Modeling and evaluating beat gestures for social robots.\n \n \n \n \n\n\n \n Zabala, U.; Rodriguez, I.; Martínez-Otzeta, J., M.; and Lazkano, E.\n\n\n \n\n\n\n Multimedia Tools and Applications, 81(3): 3421-3438. 1 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ModelingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Modeling and evaluating beat gestures for social robots},\n type = {article},\n year = {2022},\n keywords = {Fréchet gesture distance,Generative adversarial networks,Motion capturing and imitation,Social robots,Talking movements},\n pages = {3421-3438},\n volume = {81},\n websites = {https://link.springer.com/article/10.1007/s11042-021-11289-x},\n month = {1},\n publisher = {Springer},\n day = {1},\n id = {e7b1eeb3-6dac-3399-8f29-ede9e8316139},\n created = {2022-11-28T15:32:56.045Z},\n accessed = {2022-10-03},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:56.045Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Natural gestures are a desirable feature for a humanoid robot, as they are presumed to elicit a more comfortable interaction in people. With this aim in mind, we present in this paper a system to develop a natural talking gesture generation behavior. A Generative Adversarial Network (GAN) produces novel beat gestures from the data captured from recordings of human talking. The data is obtained without the need for any kind of wearable, as a motion capture system properly estimates the position of the limbs/joints involved in human expressive talking behavior. After testing in a Pepper robot, it is shown that the system is able to generate natural gestures during large talking periods without becoming repetitive. This approach is computationally more demanding than previous work, therefore a comparison is made in order to evaluate the improvements. This comparison is made by calculating some common measures about the end effectors’ trajectories (jerk and path lengths) and complemented by the Fréchet Gesture Distance (FGD) that aims to measure the fidelity of the generated gestures with respect to the provided ones. Results show that the described system is able to learn natural gestures just by observation and improves the one developed with a simpler motion capture system. The quantitative results are sustained by questionnaire based human evaluation.},\n bibtype = {article},\n author = {Zabala, Unai and Rodriguez, Igor and Martínez-Otzeta, José María and Lazkano, Elena},\n doi = {10.1007/S11042-021-11289-X/TABLES/4},\n journal = {Multimedia Tools and Applications},\n number = {3}\n}
\n
\n\n\n
\n Natural gestures are a desirable feature for a humanoid robot, as they are presumed to elicit a more comfortable interaction in people. With this aim in mind, we present in this paper a system to develop a natural talking gesture generation behavior. A Generative Adversarial Network (GAN) produces novel beat gestures from the data captured from recordings of human talking. The data is obtained without the need for any kind of wearable, as a motion capture system properly estimates the position of the limbs/joints involved in human expressive talking behavior. After testing in a Pepper robot, it is shown that the system is able to generate natural gestures during large talking periods without becoming repetitive. This approach is computationally more demanding than previous work, therefore a comparison is made in order to evaluate the improvements. This comparison is made by calculating some common measures about the end effectors’ trajectories (jerk and path lengths) and complemented by the Fréchet Gesture Distance (FGD) that aims to measure the fidelity of the generated gestures with respect to the provided ones. Results show that the described system is able to learn natural gestures just by observation and improves the one developed with a simpler motion capture system. The quantitative results are sustained by questionnaire based human evaluation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards an automatic generation of natural gestures for a storyteller robot.\n \n \n \n \n\n\n \n Zabala, U.; Rodriguez, I.; and Lazkano, E.\n\n\n \n\n\n\n 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN),1209-1215. 8 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TowardsWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Towards an automatic generation of natural gestures for a storyteller robot},\n type = {article},\n year = {2022},\n pages = {1209-1215},\n websites = {https://ieeexplore.ieee.org/document/9900532/},\n month = {8},\n publisher = {IEEE},\n day = {29},\n id = {78b85f07-a5c6-34f1-bc55-4d8d3132f650},\n created = {2022-11-28T15:32:56.732Z},\n accessed = {2022-10-04},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:56.732Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Zabala, Unai and Rodriguez, Igor and Lazkano, Elena},\n doi = {10.1109/RO-MAN53752.2022.9900532},\n journal = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Towards an Interpretable Spanish Sign Language Recognizer.\n \n \n \n\n\n \n Rodríguez-Moreno, I.; Martínez-Otzeta, J., M.; Goienetxea, I.; and Sierra, B.\n\n\n \n\n\n\n ,622-629. 2 2022.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Towards an Interpretable Spanish Sign Language Recognizer},\n type = {article},\n year = {2022},\n pages = {622-629},\n month = {2},\n publisher = {Scitepress},\n day = {17},\n id = {ffb53c94-3e52-3b48-8f65-bf2a0ecb2c90},\n created = {2022-11-28T15:32:57.911Z},\n accessed = {2022-03-16},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:57.911Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Rodríguez-Moreno, Itsaso and Martínez-Otzeta, José María and Goienetxea, Izaro and Sierra, Basilio},\n doi = {10.5220/0010870700003122}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Open-source Library for Processing of 3D Data from Indoor Scenes.\n \n \n \n\n\n \n Martínez-Otzeta, J., M.; Mendialdua, I.; Rodríguez-Moreno, I.; Rodriguez, I.; and Sierra, B.\n\n\n \n\n\n\n ,610-615. 2 2022.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {An Open-source Library for Processing of 3D Data from Indoor Scenes},\n type = {article},\n year = {2022},\n pages = {610-615},\n month = {2},\n publisher = {Scitepress},\n day = {17},\n id = {eaf2f8a8-c071-3b84-90d0-c53355d34652},\n created = {2022-11-28T15:33:06.341Z},\n accessed = {2022-03-16},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:06.341Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Martínez-Otzeta, José María and Mendialdua, Iñigo and Rodríguez-Moreno, Itsaso and Rodriguez, Igor and Sierra, Basilio},\n doi = {10.5220/0010870100003122}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sign language recognition by means of common spatial patterns: An analysis.\n \n \n \n \n\n\n \n Rodríguez-Moreno, I.; Martínez-Otzeta, J., M.; Goienetxea, I.; and Sierra, B.\n\n\n \n\n\n\n PLOS ONE, 17(10): e0276941. 10 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SignWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Sign language recognition by means of common spatial patterns: An analysis},\n type = {article},\n year = {2022},\n keywords = {Algorithms,Facial expressions,Hands,Sign language,Signal filtering,Skeletal joints,Support vector machines,Word recognition},\n pages = {e0276941},\n volume = {17},\n websites = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0276941},\n month = {10},\n publisher = {Public Library of Science},\n day = {1},\n id = {da99812f-2409-32ef-a6b3-ec0bf6f1c0bc},\n created = {2022-11-28T15:48:38.450Z},\n accessed = {2022-11-28},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:50:34.331Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Currently there are around 466 million hard of hearing people and this amount is expected to grow in the coming years. Despite the efforts that have been made, there is a communication barrier between deaf and hard of hearing signers and non-signers in environments without an interpreter. Different approaches have been developed lately to try to deal with this issue. In this work, we present an Argentinian Sign Language (LSA) recognition system which uses hand landmarks extracted from videos of the LSA64 dataset in order to distinguish between different signs. Different features are extracted from the signals created with the hand landmarks values, which are first transformed by the Common Spatial Patterns (CSP) algorithm. CSP is a dimensionality reduction algorithm and it has been widely used for EEG systems. The features extracted from the transformed signals have been then used to feed different classifiers, such as Random Forest (RF), K-Nearest Neighbors (KNN) or Multilayer Perceptron (MLP). Several experiments have been performed from which promising results have been obtained, achieving accuracy values between 0.90 and 0.95 on a set of 42 signs.},\n bibtype = {article},\n author = {Rodríguez-Moreno, Itsaso and Martínez-Otzeta, José María and Goienetxea, Izaro and Sierra, Basilio},\n doi = {10.1371/JOURNAL.PONE.0276941},\n journal = {PLOS ONE},\n number = {10}\n}
\n
\n\n\n
\n Currently there are around 466 million hard of hearing people and this amount is expected to grow in the coming years. Despite the efforts that have been made, there is a communication barrier between deaf and hard of hearing signers and non-signers in environments without an interpreter. Different approaches have been developed lately to try to deal with this issue. In this work, we present an Argentinian Sign Language (LSA) recognition system which uses hand landmarks extracted from videos of the LSA64 dataset in order to distinguish between different signs. Different features are extracted from the signals created with the hand landmarks values, which are first transformed by the Common Spatial Patterns (CSP) algorithm. CSP is a dimensionality reduction algorithm and it has been widely used for EEG systems. The features extracted from the transformed signals have been then used to feed different classifiers, such as Random Forest (RF), K-Nearest Neighbors (KNN) or Multilayer Perceptron (MLP). Several experiments have been performed from which promising results have been obtained, achieving accuracy values between 0.90 and 0.95 on a set of 42 signs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Hierarchical Approach for Spanish Sign Language Recognition: From Weak Classification to Robust Recognition System.\n \n \n \n \n\n\n \n Rodríguez-Moreno, I.; Martínez-Otzeta, J., M.; and Sierra, B.\n\n\n \n\n\n\n Lecture Notes in Networks and Systems, 542 LNNS: 37-53. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A Hierarchical Approach for Spanish Sign Language Recognition: From Weak Classification to Robust Recognition System},\n type = {article},\n year = {2022},\n keywords = {Hidden Markov Model,Sign language recognition,Spanish sign language},\n pages = {37-53},\n volume = {542 LNNS},\n websites = {https://link.springer.com/chapter/10.1007/978-3-031-16072-1_3},\n publisher = {Springer Science and Business Media Deutschland GmbH},\n id = {56012c75-c959-32ec-b6c5-a4f782c8133d},\n created = {2022-11-28T15:48:38.672Z},\n accessed = {2022-11-28},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:50:06.152Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Approximately 5% of the world’s population has hearing impairments and this number is expected to grow in the coming years due to demographic aging and the amount of noise we are exposed to. A significant fraction of this population has to endure severe impairments even since their childhood and sign languages are an effective mean of overcoming this barrier. Although sign languages are quite widespread among the deaf community, there are still situations in which the interaction with hearing people is difficult. This paper presents the sign language recognition module from an ongoing effort to develop a real-time Spanish sign language recognition system that could also work as a tutor. The proposed approach focuses on the definitions of the signs, first performing the classification of their constituents to end up recognizing full signs. Although the performance of the classification of the constituents can be quite weak, good user-independent sign recognition results are obtained.},\n bibtype = {article},\n author = {Rodríguez-Moreno, Itsaso and Martínez-Otzeta, José María and Sierra, Basilio},\n doi = {10.1007/978-3-031-16072-1_3/COVER},\n journal = {Lecture Notes in Networks and Systems}\n}
\n
\n\n\n
\n Approximately 5% of the world’s population has hearing impairments and this number is expected to grow in the coming years due to demographic aging and the amount of noise we are exposed to. A significant fraction of this population has to endure severe impairments even since their childhood and sign languages are an effective mean of overcoming this barrier. Although sign languages are quite widespread among the deaf community, there are still situations in which the interaction with hearing people is difficult. This paper presents the sign language recognition module from an ongoing effort to develop a real-time Spanish sign language recognition system that could also work as a tutor. The proposed approach focuses on the definitions of the signs, first performing the classification of their constituents to end up recognizing full signs. Although the performance of the classification of the constituents can be quite weak, good user-independent sign recognition results are obtained.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Can a Social Robot Learn to Gesticulate Just by Observing Humans?.\n \n \n \n\n\n \n Zabala, U.; Rodriguez, I.; Martínez-Otzeta, J., M.; and Lazkano, E.\n\n\n \n\n\n\n In Advances in Intelligent Systems and Computing, volume 1285, pages 137-150, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Can a Social Robot Learn to Gesticulate Just by Observing Humans?},\n type = {inproceedings},\n year = {2021},\n keywords = {Generative Adversarial Networks,Motion capturing and imitation,Social robots,Talking movements},\n pages = {137-150},\n volume = {1285},\n id = {26a425f9-0395-38e4-8ae7-bb889729e4c0},\n created = {2022-11-28T15:32:54.997Z},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:54.997Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The goal of the system presented in this paper is to develop a natural talking gesture generation behavior for a humanoid robot. With that aim, human talking gestures are recorded by a human pose detector and the motion data captured is afterwards used to feed a Generative Adversarial Network (GAN). The motion capture system is capable of properly estimating the limbs/joints involved in human expressive talking behavior without any kind of wearable. Tested in a Pepper robot, the developed system is able to generate natural gestures without becoming repetitive in large talking periods. The approach is compared with a previous work, in order to evaluate the improvements introduced by a computationally more demanding approach. This comparison is made by calculating the end effectors’ trajectories in terms of jerk and path lengths. Results show that the described system is able to learn natural gestures just by observation.},\n bibtype = {inproceedings},\n author = {Zabala, Unai and Rodriguez, Igor and Martínez-Otzeta, José María and Lazkano, Elena},\n doi = {10.1007/978-3-030-62579-5_10},\n booktitle = {Advances in Intelligent Systems and Computing}\n}
\n
\n\n\n
\n The goal of the system presented in this paper is to develop a natural talking gesture generation behavior for a humanoid robot. With that aim, human talking gestures are recorded by a human pose detector and the motion data captured is afterwards used to feed a Generative Adversarial Network (GAN). The motion capture system is capable of properly estimating the limbs/joints involved in human expressive talking behavior without any kind of wearable. Tested in a Pepper robot, the developed system is able to generate natural gestures without becoming repetitive in large talking periods. The approach is compared with a previous work, in order to evaluate the improvements introduced by a computationally more demanding approach. This comparison is made by calculating the end effectors’ trajectories in terms of jerk and path lengths. Results show that the described system is able to learn natural gestures just by observation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Quantitative analysis of robot gesticulation behavior.\n \n \n \n\n\n \n Zabala, U.; Rodriguez, I.; Martínez-Otzeta, J., M.; Irigoien, I.; and Lazkano, E.\n\n\n \n\n\n\n Autonomous Robots. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Quantitative analysis of robot gesticulation behavior},\n type = {article},\n year = {2021},\n id = {cb5ba8cb-0f86-3abc-a57d-2cd34a5e3433},\n created = {2022-11-28T15:32:55.485Z},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:55.485Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Social robot capabilities, such as talking gestures, are best produced using data driven approaches to avoid being repetitive and to show trustworthiness. However, there is a lack of robust quantitative methods that allow to compare such methods beyond visual evaluation. In this paper a quantitative analysis is performed that compares two Generative Adversarial Networks based gesture generation approaches. The aim is to measure characteristics such as fidelity to the original training data, but at the same time keep track of the degree of originality of the produced gestures. Principal Coordinate Analysis and procrustes statistics are performed and a new Fr\\'echet Gesture Distance is proposed by adapting the Fr\\'echet Inception Distance to gestures. These three techniques are taken together to asses the fidelity/originality of the generated gestures.},\n bibtype = {article},\n author = {Zabala, Unai and Rodriguez, Igor and Martínez-Otzeta, José María and Irigoien, Itziar and Lazkano, Elena},\n doi = {10.1007/s10514-020-09958-1},\n journal = {Autonomous Robots}\n}
\n
\n\n\n
\n Social robot capabilities, such as talking gestures, are best produced using data driven approaches to avoid being repetitive and to show trustworthiness. However, there is a lack of robust quantitative methods that allow to compare such methods beyond visual evaluation. In this paper a quantitative analysis is performed that compares two Generative Adversarial Networks based gesture generation approaches. The aim is to measure characteristics such as fidelity to the original training data, but at the same time keep track of the degree of originality of the produced gestures. Principal Coordinate Analysis and procrustes statistics are performed and a new Fr\\'echet Gesture Distance is proposed by adapting the Fr\\'echet Inception Distance to gestures. These three techniques are taken together to asses the fidelity/originality of the generated gestures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Expressing Robot Personality through Talking Body Language.\n \n \n \n \n\n\n \n Zabala, U.; Rodriguez, I.; Martínez-Otzeta, J., M.; and Lazkano, E.\n\n\n \n\n\n\n Applied Sciences, 11(10): 4639. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ExpressingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Expressing Robot Personality through Talking Body Language},\n type = {article},\n year = {2021},\n keywords = {emotion appraisal,robot personality,social robots,talking gesticulation behavior},\n pages = {4639},\n volume = {11},\n websites = {https://www.mdpi.com/2076-3417/11/10/4639},\n publisher = {Multidisciplinary Digital Publishing Institute},\n day = {19},\n id = {bdc058ed-65d6-3e9a-9a63-ea1e9984a90d},\n created = {2022-11-28T15:32:55.705Z},\n accessed = {2021-05-20},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:55.705Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Social robots must master the nuances of human communication as a mean to convey an effective message and generate trust. It is well-known that non-verbal cues are very important in human interactions, and therefore a social robot should produce a body language coherent with its discourse. In this work, we report on a system that endows a humanoid robot with the ability to adapt its body language according to the sentiment of its speech. A combination of talking beat gestures with emotional cues such as eye lightings, body posture of voice intonation and volume permits a rich variety of behaviors. The developed approach is not purely reactive, and it easily allows to assign a kind of personality to the robot. We present several videos with the robot in two different scenarios, and showing discrete and histrionic personalities.},\n bibtype = {article},\n author = {Zabala, Unai and Rodriguez, Igor and Martínez-Otzeta, José María and Lazkano, Elena},\n doi = {10.3390/app11104639},\n journal = {Applied Sciences},\n number = {10}\n}
\n
\n\n\n
\n Social robots must master the nuances of human communication as a mean to convey an effective message and generate trust. It is well-known that non-verbal cues are very important in human interactions, and therefore a social robot should produce a body language coherent with its discourse. In this work, we report on a system that endows a humanoid robot with the ability to adapt its body language according to the sentiment of its speech. A combination of talking beat gestures with emotional cues such as eye lightings, body posture of voice intonation and volume permits a rich variety of behaviors. The developed approach is not purely reactive, and it easily allows to assign a kind of personality to the robot. We present several videos with the robot in two different scenarios, and showing discrete and histrionic personalities.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Which gesture generator performs better?.\n \n \n \n\n\n \n Zabala, U.; Rodriguez, I.; Martínez-Otzeta, J., M.; Irigoien, I.; and Lazkano, E.\n\n\n \n\n\n\n Proceedings - IEEE International Conference on Robotics and Automation, 2021-May: 3345-3352. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Which gesture generator performs better?},\n type = {article},\n year = {2021},\n pages = {3345-3352},\n volume = {2021-May},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n id = {dda3d4a3-1c3d-386b-85c4-0bd83b0d8909},\n created = {2022-11-28T15:32:56.338Z},\n accessed = {2022-10-03},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:56.338Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Talking gestures are a fundamental part of body language and, therefore, are also important for social robots. Gesture generation by generative approaches is supposed to produce a more appropriate behavior than rule-based approaches. Usually, the evaluation of generated gestures is carried out by subjective visual evaluation, which could be cultural dependent and influenced by external factors. In this work we extend previous research on quantitative evaluation methods, comparing two generative methods and showing that their results correlate with subjective evaluation by a sizable group of people. The final goal is to offer a quantitative tool to help the researchers to automate the evaluation of their gesture generation systems, as a complementary measure to subjective methods.},\n bibtype = {article},\n author = {Zabala, Unai and Rodriguez, Igor and Martínez-Otzeta, José María and Irigoien, Itziar and Lazkano, Elena},\n doi = {10.1109/ICRA48506.2021.9561075},\n journal = {Proceedings - IEEE International Conference on Robotics and Automation}\n}
\n
\n\n\n
\n Talking gestures are a fundamental part of body language and, therefore, are also important for social robots. Gesture generation by generative approaches is supposed to produce a more appropriate behavior than rule-based approaches. Usually, the evaluation of generated gestures is carried out by subjective visual evaluation, which could be cultural dependent and influenced by external factors. In this work we extend previous research on quantitative evaluation methods, comparing two generative methods and showing that their results correlate with subjective evaluation by a sizable group of people. The final goal is to offer a quantitative tool to help the researchers to automate the evaluation of their gesture generation systems, as a complementary measure to subjective methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sign Language Recognition by Means of Common Spatial Patterns.\n \n \n \n \n\n\n \n Rodríguez-Moreno, I.; Martínez-Otzeta, J., M.; Goienetxea, I.; and Sierra, B.\n\n\n \n\n\n\n ACM International Conference Proceeding Series,96-102. 1 2021.\n \n\n\n\n
\n\n\n\n \n \n \"SignWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Sign Language Recognition by Means of Common Spatial Patterns},\n type = {article},\n year = {2021},\n keywords = {Common Spatial Patterns,Computer Vision,Sign Language Recognition},\n pages = {96-102},\n websites = {https://doi.org/10.1145/3453800.3453818},\n month = {1},\n publisher = {Association for Computing Machinery},\n day = {29},\n id = {6e7741b6-0133-391f-a5c6-512e7902b14a},\n created = {2022-11-28T15:32:57.658Z},\n accessed = {2021-09-27},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:57.658Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Currently, and despite the efforts that have been made, people with hearing impairments often have difficulties to use applications that have been designed for people who can hear, or simply to communicate with their environment. In this work, we present an Argentinian Sign Language (LSA) recognition system which distinguishes between different signs using hand landmarks extracted from the videos of the dataset. The Common Spatial Patterns (CSP) algorithm is used to extract features, and the classification is performed with multiple classifiers. Different experiments have been made from which promising results have been obtained.},\n bibtype = {article},\n author = {Rodríguez-Moreno, Itsaso and Martínez-Otzeta, José Mariá and Goienetxea, Izaro and Sierra, Basilio},\n doi = {10.1145/3453800.3453818},\n journal = {ACM International Conference Proceeding Series}\n}
\n
\n\n\n
\n Currently, and despite the efforts that have been made, people with hearing impairments often have difficulties to use applications that have been designed for people who can hear, or simply to communicate with their environment. In this work, we present an Argentinian Sign Language (LSA) recognition system which distinguishes between different signs using hand landmarks extracted from the videos of the dataset. The Common Spatial Patterns (CSP) algorithm is used to extract features, and the classification is performed with multiple classifiers. Different experiments have been made from which promising results have been obtained.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A New Approach for Video Action Recognition: CSP-Based Filtering for Video to Image Transformation.\n \n \n \n\n\n \n Rodriguez-Moreno, I.; Martinez-Otzeta, J., M.; Goienetxea, I.; Rodriguez, I.; and Sierra, B.\n\n\n \n\n\n\n IEEE Access, 9: 139946-139957. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A New Approach for Video Action Recognition: CSP-Based Filtering for Video to Image Transformation},\n type = {article},\n year = {2021},\n keywords = {Action recognition,common spatial patterns,global visual descriptors,sign language recognition,social robotics},\n pages = {139946-139957},\n volume = {9},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n id = {ca427766-de85-3177-9688-b28c1d2b9293},\n created = {2022-11-28T15:32:58.145Z},\n accessed = {2022-03-16},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:58.145Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {In this paper we report on the design of a pipeline involving Common Spatial Patterns (CSP), a signal processing approach commonly used in the field of electroencephalography (EEG), matrix representation of features and image classification to categorize videos taken by a humanoid robot. The ultimate goal is to endow the robot with action recognition capabilities for a more natural social interaction. Summarizing, we apply the CSP algorithm to a set of signals obtained for each video by extracting skeleton joints of the person performing the action. From the transformed signals a summary image is obtained for each video, and these images are then classified using two different approaches; global visual descriptors and convolutional neural networks. The presented approach has been tested on two data sets that represent two scenarios with common characteristics. The first one is a data set with 46 individuals performing 6 different actions. In order to create the group of signals of each video, OpenPose has been used to extract the skeleton joints of the person performing the actions. The second data set is an Argentinian Sign Language data set (LSA64) from which the signs performed using just the right hand have been used. In this case the joint signals have been obtained using MediaPipe. The results obtained with the presented method have been compared with a Long Short-Term Memory (LSTM) method, achieving promising results.},\n bibtype = {article},\n author = {Rodriguez-Moreno, Itsaso and Martinez-Otzeta, Jose Maria and Goienetxea, Izaro and Rodriguez, Igor and Sierra, Basilio},\n doi = {10.1109/ACCESS.2021.3118829},\n journal = {IEEE Access}\n}
\n
\n\n\n
\n In this paper we report on the design of a pipeline involving Common Spatial Patterns (CSP), a signal processing approach commonly used in the field of electroencephalography (EEG), matrix representation of features and image classification to categorize videos taken by a humanoid robot. The ultimate goal is to endow the robot with action recognition capabilities for a more natural social interaction. Summarizing, we apply the CSP algorithm to a set of signals obtained for each video by extracting skeleton joints of the person performing the action. From the transformed signals a summary image is obtained for each video, and these images are then classified using two different approaches; global visual descriptors and convolutional neural networks. The presented approach has been tested on two data sets that represent two scenarios with common characteristics. The first one is a data set with 46 individuals performing 6 different actions. In order to create the group of signals of each video, OpenPose has been used to extract the skeleton joints of the person performing the actions. The second data set is an Argentinian Sign Language data set (LSA64) from which the signs performed using just the right hand have been used. In this case the joint signals have been obtained using MediaPipe. The results obtained with the presented method have been compared with a Long Short-Term Memory (LSTM) method, achieving promising results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Problems selection under dynamic selection of the best base classifier in one versus one: PSEUDOVO.\n \n \n \n \n\n\n \n Goienetxea, I.; Mendialdua, I.; Rodríguez, I.; and Sierra, B.\n\n\n \n\n\n\n International Journal of Machine Learning and Cybernetics,1-15. 1 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ProblemsWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Problems selection under dynamic selection of the best base classifier in one versus one: PSEUDOVO},\n type = {article},\n year = {2021},\n keywords = {Decomposition strategies,Dynamic Classifier Selection,One versus one,Supervised classification},\n pages = {1-15},\n websites = {https://doi.org/10.1007/s13042-020-01270-9},\n month = {1},\n publisher = {Springer Science and Business Media Deutschland GmbH},\n day = {24},\n id = {90b61d66-6966-3bc6-97f0-14dddee97acd},\n created = {2022-11-28T15:33:06.794Z},\n accessed = {2021-03-24},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:06.794Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Class binarization techniques are used to decompose multi-class problems into several easier-to-solve binary sub-problems. One of the most popular binarization techniques is One versus One (OVO), which creates a sub-problem for each pair of classes of the original problem. Different versions of OVO have been developed to try to solve some of its problems, such as DYNOVO, which dynamically tries to select the best classifier for each sub-problem. In this paper, a new extension that has been made for DYNOVO, called PSEUDOVO, is presented. This extension also tries to avoid the non-competent sub-problems. An empirical study has been carried out over several UCI data sets, as well as a new data set of musical pieces of well-known classical composers. Promising results have been obtained, from which can be concluded that the PSEUDOVO extension improves the performance of DYNOVO.},\n bibtype = {article},\n author = {Goienetxea, Izaro and Mendialdua, Iñigo and Rodríguez, Igor and Sierra, Basilio},\n doi = {10.1007/s13042-020-01270-9},\n journal = {International Journal of Machine Learning and Cybernetics}\n}
\n
\n\n\n
\n Class binarization techniques are used to decompose multi-class problems into several easier-to-solve binary sub-problems. One of the most popular binarization techniques is One versus One (OVO), which creates a sub-problem for each pair of classes of the original problem. Different versions of OVO have been developed to try to solve some of its problems, such as DYNOVO, which dynamically tries to select the best classifier for each sub-problem. In this paper, a new extension that has been made for DYNOVO, called PSEUDOVO, is presented. This extension also tries to avoid the non-competent sub-problems. An empirical study has been carried out over several UCI data sets, as well as a new data set of musical pieces of well-known classical composers. Promising results have been obtained, from which can be concluded that the PSEUDOVO extension improves the performance of DYNOVO.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Small variation in dynamic functional connectivity in cerebellar networks.\n \n \n \n\n\n \n Fernandez-Iriondo, I.; Jimenez-Marin, A.; Diez, I.; Bonifazi, P.; Swinnen, S., P.; Muñoz, M., A.; and Cortes, J., M.\n\n\n \n\n\n\n Neurocomputing. 5 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Small variation in dynamic functional connectivity in cerebellar networks},\n type = {article},\n year = {2021},\n month = {5},\n publisher = {Elsevier BV},\n day = {13},\n id = {7b1e8454-32cb-3e81-8fe8-2feae9aedccf},\n created = {2022-11-28T15:33:07.035Z},\n accessed = {2021-06-24},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:07.035Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Brain networks can be defined and explored through their connectivity. Here, we analyzed the relationship between structural connectivity (SC) across 2,514 regions that cover the entire brain and brainstem, and their dynamic functional connectivity (DFC). To do so, we focused on a combination of two metrics: the first assesses the degree of SC-DFC similarity -i.e. the extent to which the dynamic functional correlations can be explained by structural pathways-; and the second is the intrinsic variability of the DFC networks over time. Overall, we found that cerebellar networks have a smaller DFC variability than other networks in the brain. Moreover, the internal structure of the cerebellum could be clearly divided in two distinct posterior and anterior parts, the latter also connected to the brainstem. The mechanism to maintain small variability of the DFC in the posterior part of the cerebellum is consistent with another of our findings, namely, that this structure exhibits the highest SC-DFC similarity relative to the other networks studied, i.e. structure constrains the variation in dynamics. By contrast, the anterior part of the cerebellum also exhibits small DFC variability but it has the lowest SC-DFC similarity, suggesting a different mechanism is at play. Because this structure connects to the brainstem, which regulates sleep cycles, cardiac and respiratory functioning, we suggest that such critical functionality drives the low variability in the DFC. Overall, the low variability detected in DFC expands our current knowledge of cerebellar networks, which are extremely rich and complex, participating in a wide range of cognitive functions, from movement control and coordination to executive function or emotional regulation. Moreover, the association between such low variability and structure suggests that differentiated computational principles can be applied in the cerebellum as opposed to other structures, such as the cerebral cortex.},\n bibtype = {article},\n author = {Fernandez-Iriondo, Izaro and Jimenez-Marin, Antonio and Diez, Ibai and Bonifazi, Paolo and Swinnen, Stephan P. and Muñoz, Miguel A. and Cortes, Jesus M.},\n doi = {10.1016/j.neucom.2020.09.092},\n journal = {Neurocomputing}\n}
\n
\n\n\n
\n Brain networks can be defined and explored through their connectivity. Here, we analyzed the relationship between structural connectivity (SC) across 2,514 regions that cover the entire brain and brainstem, and their dynamic functional connectivity (DFC). To do so, we focused on a combination of two metrics: the first assesses the degree of SC-DFC similarity -i.e. the extent to which the dynamic functional correlations can be explained by structural pathways-; and the second is the intrinsic variability of the DFC networks over time. Overall, we found that cerebellar networks have a smaller DFC variability than other networks in the brain. Moreover, the internal structure of the cerebellum could be clearly divided in two distinct posterior and anterior parts, the latter also connected to the brainstem. The mechanism to maintain small variability of the DFC in the posterior part of the cerebellum is consistent with another of our findings, namely, that this structure exhibits the highest SC-DFC similarity relative to the other networks studied, i.e. structure constrains the variation in dynamics. By contrast, the anterior part of the cerebellum also exhibits small DFC variability but it has the lowest SC-DFC similarity, suggesting a different mechanism is at play. Because this structure connects to the brainstem, which regulates sleep cycles, cardiac and respiratory functioning, we suggest that such critical functionality drives the low variability in the DFC. Overall, the low variability detected in DFC expands our current knowledge of cerebellar networks, which are extremely rich and complex, participating in a wide range of cognitive functions, from movement control and coordination to executive function or emotional regulation. Moreover, the association between such low variability and structure suggests that differentiated computational principles can be applied in the cerebellum as opposed to other structures, such as the cerebral cortex.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Shedding Light on People Action Recognition in Social Robotics by Means of Common Spatial Patterns.\n \n \n \n \n\n\n \n Rodríguez-Moreno, I.; Martínez-Otzeta, J., M.; Goienetxea, I.; Rodriguez-Rodriguez, I.; and Sierra, B.\n\n\n \n\n\n\n Sensors, 20(8): 2436. 4 2020.\n \n\n\n\n
\n\n\n\n \n \n \"SheddingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Shedding Light on People Action Recognition in Social Robotics by Means of Common Spatial Patterns},\n type = {article},\n year = {2020},\n keywords = {Action recognition,Common spatial patterns,Social robotics},\n pages = {2436},\n volume = {20},\n websites = {https://www.mdpi.com/1424-8220/20/8/2436},\n month = {4},\n publisher = {MDPI AG},\n day = {24},\n id = {292f0c6a-6da6-3127-b59e-0c045119fc7a},\n created = {2022-11-28T15:32:57.167Z},\n accessed = {2021-02-24},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:57.167Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Action recognition in robotics is a research field that has gained momentum in recent years. In this work, a video activity recognition method is presented, which has the ultimate goal of endowing a robot with action recognition capabilities for a more natural social interaction. The application of Common Spatial Patterns (CSP), a signal processing approach widely used in electroencephalography (EEG), is presented in a novel manner to be used in activity recognition in videos taken by a humanoid robot. A sequence of skeleton data is considered as a multidimensional signal and filtered according to the CSP algorithm. Then, characteristics extracted from these filtered data are used as features for a classifier. A database with 46 individuals performing six different actions has been created to test the proposed method. The CSP-based method along with a Linear Discriminant Analysis (LDA) classifier has been compared to a Long Short-Term Memory (LSTM) neural network, showing that the former obtains similar or better results than the latter, while being simpler.},\n bibtype = {article},\n author = {Rodríguez-Moreno, Itsaso and Martínez-Otzeta, José María and Goienetxea, Izaro and Rodriguez-Rodriguez, Igor and Sierra, Basilio},\n doi = {10.3390/s20082436},\n journal = {Sensors},\n number = {8}\n}
\n
\n\n\n
\n Action recognition in robotics is a research field that has gained momentum in recent years. In this work, a video activity recognition method is presented, which has the ultimate goal of endowing a robot with action recognition capabilities for a more natural social interaction. The application of Common Spatial Patterns (CSP), a signal processing approach widely used in electroencephalography (EEG), is presented in a novel manner to be used in activity recognition in videos taken by a humanoid robot. A sequence of skeleton data is considered as a multidimensional signal and filtered according to the CSP algorithm. Then, characteristics extracted from these filtered data are used as features for a classifier. A database with 46 individuals performing six different actions has been created to test the proposed method. The CSP-based method along with a Linear Discriminant Analysis (LDA) classifier has been compared to a Long Short-Term Memory (LSTM) neural network, showing that the former obtains similar or better results than the latter, while being simpler.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using Common Spatial Patterns to Select Relevant Pixels for Video Activity Recognition.\n \n \n \n \n\n\n \n Rodríguez-Moreno, I.; Martínez-Otzeta, J., M.; Sierra, B.; Irigoien, I.; Rodriguez-Rodriguez, I.; and Goienetxea, I.\n\n\n \n\n\n\n Applied Sciences, 10(22): 8075. 11 2020.\n \n\n\n\n
\n\n\n\n \n \n \"UsingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Using Common Spatial Patterns to Select Relevant Pixels for Video Activity Recognition},\n type = {article},\n year = {2020},\n keywords = {Common spatial patterns,Histogram of optical flow,Video activity recognition},\n pages = {8075},\n volume = {10},\n websites = {https://www.mdpi.com/2076-3417/10/22/8075},\n month = {11},\n publisher = {MDPI AG},\n day = {14},\n id = {388c871d-fb42-3e90-b51d-380e122bc936},\n created = {2022-11-28T15:32:57.420Z},\n accessed = {2021-02-24},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:57.420Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Video activity recognition, despite being an emerging task, has been the subject of important research due to the importance of its everyday applications. Video camera surveillance could benefit greatly from advances in this field. In the area of robotics, the tasks of autonomous navigation or social interaction could also take advantage of the knowledge extracted from live video recording. In this paper, a new approach for video action recognition is presented. The new technique consists of introducing a method, which is usually used in Brain Computer Interface (BCI) for electroencephalography (EEG) systems, and adapting it to this problem. After describing the technique, achieved results are shown and a comparison with another method is carried out to analyze the performance of our new approach.},\n bibtype = {article},\n author = {Rodríguez-Moreno, Itsaso and Martínez-Otzeta, José María and Sierra, Basilio and Irigoien, Itziar and Rodriguez-Rodriguez, Igor and Goienetxea, Izaro},\n doi = {10.3390/app10228075},\n journal = {Applied Sciences},\n number = {22}\n}
\n
\n\n\n
\n Video activity recognition, despite being an emerging task, has been the subject of important research due to the importance of its everyday applications. Video camera surveillance could benefit greatly from advances in this field. In the area of robotics, the tasks of autonomous navigation or social interaction could also take advantage of the knowledge extracted from live video recording. In this paper, a new approach for video action recognition is presented. The new technique consists of introducing a method, which is usually used in Brain Computer Interface (BCI) for electroencephalography (EEG) systems, and adapting it to this problem. After describing the technique, achieved results are shown and a comparison with another method is carried out to analyze the performance of our new approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Personal guides: Heterogeneous robots sharing personal tours in multi-floor environments.\n \n \n \n\n\n \n Rodriguez, I.; Zabala, U.; Marín-Reyes, P., A.; Jauregi, E.; Lorenzo-Navarro, J.; Lazkano, E.; and Castrillón-Santana, M.\n\n\n \n\n\n\n Sensors (Switzerland). 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Personal guides: Heterogeneous robots sharing personal tours in multi-floor environments},\n type = {article},\n year = {2020},\n keywords = {Distributed robotic system,Face re-identification,Neural networks,Social service robots},\n id = {46bd258f-4b88-3494-a1c9-ab81e671a119},\n created = {2022-11-28T15:32:58.842Z},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:58.842Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {GidaBot is an application designed to setup and run a heterogeneous team of robots to act as tour guides in multi-floor buildings. Although the tours can go through several floors, the robots can only service a single floor, and thus, a guiding task may require collaboration among several robots. The designed system makes use of a robust inter-robot communication strategy to share goals and paths during the guiding tasks. Such tours work as personal services carried out by one or more robots. In this paper, a face re-identification/verification module based on state-of-the-art techniques is developed, evaluated offline, and integrated into GidaBot’s real daily activities, to avoid new visitors interfering with those attended. It is a complex problem because, as users are casual visitors, no long-term information is stored, and consequently, faces are unknown in the training step. Initially, re-identification and verification are evaluated offline considering different face detectors and computing distances in a face embedding representation. To fulfil the goal online, several face detectors are fused in parallel to avoid face alignment bias produced by face detectors under certain circumstances, and the decision is made based on a minimum distance criterion. This fused approach outperforms any individual method and highly improves the real system’s reliability, as the tests carried out using real robots at the Faculty of Informatics in San Sebastian show.},\n bibtype = {article},\n author = {Rodriguez, Igor and Zabala, Unai and Marín-Reyes, Pedro A. and Jauregi, Ekaitz and Lorenzo-Navarro, Javier and Lazkano, Elena and Castrillón-Santana, Modesto},\n doi = {10.3390/s20092480},\n journal = {Sensors (Switzerland)}\n}
\n
\n\n\n
\n GidaBot is an application designed to setup and run a heterogeneous team of robots to act as tour guides in multi-floor buildings. Although the tours can go through several floors, the robots can only service a single floor, and thus, a guiding task may require collaboration among several robots. The designed system makes use of a robust inter-robot communication strategy to share goals and paths during the guiding tasks. Such tours work as personal services carried out by one or more robots. In this paper, a face re-identification/verification module based on state-of-the-art techniques is developed, evaluated offline, and integrated into GidaBot’s real daily activities, to avoid new visitors interfering with those attended. It is a complex problem because, as users are casual visitors, no long-term information is stored, and consequently, faces are unknown in the training step. Initially, re-identification and verification are evaluated offline considering different face detectors and computing distances in a face embedding representation. To fulfil the goal online, several face detectors are fused in parallel to avoid face alignment bias produced by face detectors under certain circumstances, and the decision is made based on a minimum distance criterion. This fused approach outperforms any individual method and highly improves the real system’s reliability, as the tests carried out using real robots at the Faculty of Informatics in San Sebastian show.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Learning to Gesticulate by Observation Using a Deep Generative Approach.\n \n \n \n\n\n \n Zabala, U.; Rodriguez, I.; Martínez-Otzeta, J., M.; and Lazkano, E.\n\n\n \n\n\n\n In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 2019. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Learning to Gesticulate by Observation Using a Deep Generative Approach},\n type = {inproceedings},\n year = {2019},\n keywords = {Generative Adversarial Networks,Motion capturing and imitation,Social robots,Talking movements},\n id = {6b36355b-7658-37ce-815e-346dc1ceb711},\n created = {2022-11-28T15:32:55.245Z},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:55.245Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The goal of the system presented in this paper is to develop a natural talking gesture generation behavior for a humanoid robot, by feeding a Generative Adversarial Network (GAN) with human talking gestures recorded by a Kinect. A direct kinematic approach is used to translate from human poses to robot joint positions. The provided videos show that the robot is able to use a wide variety of gestures, offering a non-dreary, natural expression level.},\n bibtype = {inproceedings},\n author = {Zabala, Unai and Rodriguez, Igor and Martínez-Otzeta, José María and Lazkano, Elena},\n doi = {10.1007/978-3-030-35888-4_62},\n booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n The goal of the system presented in this paper is to develop a natural talking gesture generation behavior for a humanoid robot, by feeding a Generative Adversarial Network (GAN) with human talking gestures recorded by a Kinect. A direct kinematic approach is used to translate from human poses to robot joint positions. The provided videos show that the robot is able to use a wide variety of gestures, offering a non-dreary, natural expression level.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Video Activity Recognition: State-of-the-Art.\n \n \n \n \n\n\n \n Rodríguez-Moreno, I.; Martínez-Otzeta, J., M.; Sierra, B.; Rodriguez, I.; and Jauregi, E.\n\n\n \n\n\n\n Sensors, 19(14): 3160. 7 2019.\n \n\n\n\n
\n\n\n\n \n \n \"VideoWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Video Activity Recognition: State-of-the-Art},\n type = {article},\n year = {2019},\n keywords = {Activity recognition,Computer vision,Deep learning,Optical flow},\n pages = {3160},\n volume = {19},\n websites = {https://www.mdpi.com/1424-8220/19/14/3160},\n month = {7},\n publisher = {MDPI AG},\n day = {18},\n id = {0791e58d-5727-3124-aac2-622c3f6bfb1b},\n created = {2022-11-28T15:32:56.938Z},\n accessed = {2021-02-24},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:56.938Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Video activity recognition, although being an emerging task, has been the subject of important research efforts due to the importance of its everyday applications. Surveillance by video cameras could benefit greatly by advances in this field. In the area of robotics, the tasks of autonomous navigation or social interaction could also take advantage of the knowledge extracted from live video recording. The aim of this paper is to survey the state-of-the-art techniques for video activity recognition while at the same time mentioning other techniques used for the same task that the research community has known for several years. For each of the analyzed methods, its contribution over previous works and the proposed approach performance are discussed.},\n bibtype = {article},\n author = {Rodríguez-Moreno, Itsaso and Martínez-Otzeta, José María and Sierra, Basilio and Rodriguez, Igor and Jauregi, Ekaitz},\n doi = {10.3390/s19143160},\n journal = {Sensors},\n number = {14}\n}
\n
\n\n\n
\n Video activity recognition, although being an emerging task, has been the subject of important research efforts due to the importance of its everyday applications. Surveillance by video cameras could benefit greatly by advances in this field. In the area of robotics, the tasks of autonomous navigation or social interaction could also take advantage of the knowledge extracted from live video recording. The aim of this paper is to survey the state-of-the-art techniques for video activity recognition while at the same time mentioning other techniques used for the same task that the research community has known for several years. For each of the analyzed methods, its contribution over previous works and the proposed approach performance are discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Spontaneous talking gestures using Generative Adversarial Networks.\n \n \n \n\n\n \n Rodriguez, I.; Martínez-Otzeta, J., M.; Irigoien, I.; and Lazkano, E.\n\n\n \n\n\n\n Robotics and Autonomous Systems. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Spontaneous talking gestures using Generative Adversarial Networks},\n type = {article},\n year = {2019},\n keywords = {Body language expression,Generative adversarial networks,Generative learning models,Motion generation,Principal coordinate analysis,Social robotics},\n id = {b27dc424-0492-3487-aa14-e9866d8af971},\n created = {2022-11-28T15:32:59.643Z},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:59.643Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a talking gesture generation system based on Generative Adversarial Networks, along with an evaluation of its adequateness. The talking gesture generation system produces a sequence of joint positions of the robot's upper body which keeps in step with an uttered sentence. The suitability of the approach is demonstrated with a real robot. Besides, the motion generation method is compared with other (non-deep) generative approaches. A two-step comparison is made. On the one hand, a statistical analysis is performed over movements generated by each approach by means of Principal Coordinate Analysis. On the other hand, the robot motion adequateness is measured by calculating the end effectors’ jerk, path lengths and 3D space coverage.},\n bibtype = {article},\n author = {Rodriguez, Igor and Martínez-Otzeta, José María and Irigoien, Itziar and Lazkano, Elena},\n doi = {10.1016/j.robot.2018.11.024},\n journal = {Robotics and Autonomous Systems}\n}
\n
\n\n\n
\n This paper presents a talking gesture generation system based on Generative Adversarial Networks, along with an evaluation of its adequateness. The talking gesture generation system produces a sequence of joint positions of the robot's upper body which keeps in step with an uttered sentence. The suitability of the approach is demonstrated with a real robot. Besides, the motion generation method is compared with other (non-deep) generative approaches. A two-step comparison is made. On the one hand, a statistical analysis is performed over movements generated by each approach by means of Principal Coordinate Analysis. On the other hand, the robot motion adequateness is measured by calculating the end effectors’ jerk, path lengths and 3D space coverage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Talking with Sentiment: Adaptive Expression Generation Behavior for Social Robots.\n \n \n \n \n\n\n \n Rodriguez, I.; Manfré, A.; Vella, F.; Infantino, I.; and Lazkano, E.\n\n\n \n\n\n\n In Advances in Intelligent Systems and Computing, volume 855, pages 209-223, 11 2019. Springer Verlag\n \n\n\n\n
\n\n\n\n \n \n \"TalkingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Talking with Sentiment: Adaptive Expression Generation Behavior for Social Robots},\n type = {inproceedings},\n year = {2019},\n keywords = {Cognitive robotics,Generative adversarial network,Human-robot interaction,Social robotics},\n pages = {209-223},\n volume = {855},\n websites = {https://doi.org/10.1007/978-3-319-99885-5_15},\n month = {11},\n publisher = {Springer Verlag},\n day = {22},\n id = {93e3a024-2f71-3d38-9dd2-5c073e6a8d9d},\n created = {2022-11-28T15:32:59.890Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:59.890Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a neural-based approach for generating natural gesticulation movements for a humanoid robot enriched with other relevant social signals depending on sentiment processing. In particular, we take into account some simple head postures, voice parameters, and eyes colors as expressiveness enhancing elements. A Generative Adversarial Network (GAN) allows the proposed system to extend the variability of basic gesticulation movements while avoiding repetitive and monotonous behavior. Using sentiment analysis on the text that will be pronounced by the robot, we derive a value for emotion valence and coherently choose suitable parameters for the expressive elements. In this way, the robot has an adaptive expression generation during talking. Experiments validate the proposed approach by analyzing the contribution of all the factors to understand the naturalness perception of the robot behavior.},\n bibtype = {inproceedings},\n author = {Rodriguez, Igor and Manfré, Adriano and Vella, Filippo and Infantino, Ignazio and Lazkano, Elena},\n doi = {10.1007/978-3-319-99885-5_15},\n booktitle = {Advances in Intelligent Systems and Computing}\n}
\n
\n\n\n
\n This paper presents a neural-based approach for generating natural gesticulation movements for a humanoid robot enriched with other relevant social signals depending on sentiment processing. In particular, we take into account some simple head postures, voice parameters, and eyes colors as expressiveness enhancing elements. A Generative Adversarial Network (GAN) allows the proposed system to extend the variability of basic gesticulation movements while avoiding repetitive and monotonous behavior. Using sentiment analysis on the text that will be pronounced by the robot, we derive a value for emotion valence and coherently choose suitable parameters for the expressive elements. In this way, the robot has an adaptive expression generation during talking. Experiments validate the proposed approach by analyzing the contribution of all the factors to understand the naturalness perception of the robot behavior.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Spontaneous talking gestures using Generative Adversarial Networks.\n \n \n \n\n\n \n Rodriguez, I.; Martínez-Otzeta, J., M.; Irigoien, I.; and Lazkano, E.\n\n\n \n\n\n\n Robotics and Autonomous Systems, 114: 57-65. 4 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Spontaneous talking gestures using Generative Adversarial Networks},\n type = {article},\n year = {2019},\n keywords = {Body language expression,Generative adversarial networks,Generative learning models,Motion generation,Principal coordinate analysis,Social robotics},\n pages = {57-65},\n volume = {114},\n month = {4},\n publisher = {North-Holland},\n day = {1},\n id = {b0a90111-1737-3f06-86fa-ee374ed71c33},\n created = {2022-11-28T15:33:04.571Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:04.571Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a talking gesture generation system based on Generative Adversarial Networks, along with an evaluation of its adequateness. The talking gesture generation system produces a sequence of joint positions of the robot's upper body which keeps in step with an uttered sentence. The suitability of the approach is demonstrated with a real robot. Besides, the motion generation method is compared with other (non-deep) generative approaches. A two-step comparison is made. On the one hand, a statistical analysis is performed over movements generated by each approach by means of Principal Coordinate Analysis. On the other hand, the robot motion adequateness is measured by calculating the end effectors’ jerk, path lengths and 3D space coverage.},\n bibtype = {article},\n author = {Rodriguez, Igor and Martínez-Otzeta, José María and Irigoien, Itziar and Lazkano, Elena},\n doi = {10.1016/J.ROBOT.2018.11.024},\n journal = {Robotics and Autonomous Systems}\n}
\n
\n\n\n
\n This paper presents a talking gesture generation system based on Generative Adversarial Networks, along with an evaluation of its adequateness. The talking gesture generation system produces a sequence of joint positions of the robot's upper body which keeps in step with an uttered sentence. The suitability of the approach is demonstrated with a real robot. Besides, the motion generation method is compared with other (non-deep) generative approaches. A two-step comparison is made. On the one hand, a statistical analysis is performed over movements generated by each approach by means of Principal Coordinate Analysis. On the other hand, the robot motion adequateness is measured by calculating the end effectors’ jerk, path lengths and 3D space coverage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GidaBot: a system of heterogeneous robots collaborating as guides in multi-floor environments.\n \n \n \n \n\n\n \n Parra, O.; Rodriguez, I.; Jauregi, E.; Lazkano, E.; and Ruiz, T.\n\n\n \n\n\n\n Intelligent Service Robotics, 12(4): 319-332. 10 2019.\n \n\n\n\n
\n\n\n\n \n \n \"GidaBot:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {GidaBot: a system of heterogeneous robots collaborating as guides in multi-floor environments},\n type = {article},\n year = {2019},\n keywords = {Distributed robotic system,Service robots,Tour-guide robots},\n pages = {319-332},\n volume = {12},\n websites = {https://doi.org/10.1007/s11370-019-00285-8},\n month = {10},\n publisher = {Springer Verlag},\n day = {1},\n id = {e81b9cc5-0d5e-3f5f-921b-0737ff0e5cb7},\n created = {2022-11-28T15:33:04.786Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:04.786Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {GidaBot is an application designed to set up and run a heterogeneous team of robots to act as tour guides in multi-floor buildings. Although the tours can go through several floors, robots are not allowed to use the lift, and thus, each guiding task requires collaboration among several robots, one per floor. The designed system makes use of a robust inter-robot communication strategy to share goals and paths during the guiding tasks. A user-friendly GUI helps untrained users or new visitors to easily choose target locations or define a list of locations to be visited sequentially. A prototype has been implemented using ROS, and the system robustness has been tested in a Gazebo-based simulated robot/environment and using real robots at the Faculty of Informatics in San Sebastian. The whole application is available together with a simulated world so that the system functioning can be checked further.},\n bibtype = {article},\n author = {Parra, Oihane and Rodriguez, Igor and Jauregi, Ekaitz and Lazkano, Elena and Ruiz, Txelo},\n doi = {10.1007/s11370-019-00285-8},\n journal = {Intelligent Service Robotics},\n number = {4}\n}
\n
\n\n\n
\n GidaBot is an application designed to set up and run a heterogeneous team of robots to act as tour guides in multi-floor buildings. Although the tours can go through several floors, robots are not allowed to use the lift, and thus, each guiding task requires collaboration among several robots, one per floor. The designed system makes use of a robust inter-robot communication strategy to share goals and paths during the guiding tasks. A user-friendly GUI helps untrained users or new visitors to easily choose target locations or define a list of locations to be visited sequentially. A prototype has been implemented using ROS, and the system robustness has been tested in a Gazebo-based simulated robot/environment and using real robots at the Faculty of Informatics in San Sebastian. The whole application is available together with a simulated world so that the system functioning can be checked further.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n On how self-body awareness improves autonomy in social robots.\n \n \n \n\n\n \n Rodriguez, I.; Martinez-Otzeta, J., M.; Lazkano, E.; Ruiz, T.; and Sierra, B.\n\n\n \n\n\n\n 2017 IEEE International Conference on Robotics and Biomimetics, ROBIO 2017, 2018-Janua: 1688-1693. 3 2018.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {On how self-body awareness improves autonomy in social robots},\n type = {article},\n year = {2018},\n pages = {1688-1693},\n volume = {2018-Janua},\n month = {3},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n day = {23},\n id = {a2c8e7c5-960c-38d8-9913-879c336373be},\n created = {2022-11-28T15:33:03.639Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:03.639Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Just as humans show consciousness of their body, social robots, in the way to be truly autonomous need to be aware of their body posture. Feasible gestures, moves and actions depend on the current body posture. The work developed in this paper aims to empirically show how self configuration recognition augments the degree of autonomy of a robot in the context of entertainment robotics. The integration of a classification tree for body posture identification based on data acquired from proprioceptive sensors of a NAO robot allows to interact with the robot in a more flexible and persistent manner. As a result, the robot shows a more sound behavior and greater degree of autonomy. Moreover, even if the body-awareness has been developed for minstrel robots, its application can be generalized to other contexts.},\n bibtype = {article},\n author = {Rodriguez, I. and Martinez-Otzeta, J. M. and Lazkano, E. and Ruiz, T. and Sierra, B.},\n doi = {10.1109/ROBIO.2017.8324661},\n journal = {2017 IEEE International Conference on Robotics and Biomimetics, ROBIO 2017}\n}
\n
\n\n\n
\n Just as humans show consciousness of their body, social robots, in the way to be truly autonomous need to be aware of their body posture. Feasible gestures, moves and actions depend on the current body posture. The work developed in this paper aims to empirically show how self configuration recognition augments the degree of autonomy of a robot in the context of entertainment robotics. The integration of a classification tree for body posture identification based on data acquired from proprioceptive sensors of a NAO robot allows to interact with the robot in a more flexible and persistent manner. As a result, the robot shows a more sound behavior and greater degree of autonomy. Moreover, even if the body-awareness has been developed for minstrel robots, its application can be generalized to other contexts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robots on stage: A cognitive framework for socially interacting robots.\n \n \n \n\n\n \n Rodriguez, I.; Astigarraga, A.; Lazkano, E.; Martínez-Otzeta, J., M.; and Mendialdua, I.\n\n\n \n\n\n\n Biologically Inspired Cognitive Architectures, 25: 17-25. 8 2018.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Robots on stage: A cognitive framework for socially interacting robots},\n type = {article},\n year = {2018},\n keywords = {Affective perception,Cognitive architecture,Social robotics},\n pages = {17-25},\n volume = {25},\n month = {8},\n publisher = {Elsevier},\n day = {1},\n id = {6390cd5b-ca78-3790-a77e-d84f4077ccff},\n created = {2022-11-28T15:33:03.883Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:03.883Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This article is an attempt to characterize the cognitive skills involved in the development of socially interacting robots. We argue that performative arts, such as oral improvised poetry, can serve as a useful testbed for the development and evaluation of robots that interact with humans. The paper presents a speech-based humanoid poet-performer that can (1) listen to human commands and generate poems on demand; (2) perceive audience's feedback and react displaying the corresponding emotional response; and (3) generate natural gesticulation movements enriched with social signals depending on sentiment processing. We discuss each of the involved abilities, present working implementations and show how they are combined in an embodied cognitive architecture to achieve the fluent coordination and joint-action timing needed in live events.},\n bibtype = {article},\n author = {Rodriguez, Igor and Astigarraga, Aitzol and Lazkano, Elena and Martínez-Otzeta, José María and Mendialdua, Inigo},\n doi = {10.1016/J.BICA.2018.07.014},\n journal = {Biologically Inspired Cognitive Architectures}\n}
\n
\n\n\n
\n This article is an attempt to characterize the cognitive skills involved in the development of socially interacting robots. We argue that performative arts, such as oral improvised poetry, can serve as a useful testbed for the development and evaluation of robots that interact with humans. The paper presents a speech-based humanoid poet-performer that can (1) listen to human commands and generate poems on demand; (2) perceive audience's feedback and react displaying the corresponding emotional response; and (3) generate natural gesticulation movements enriched with social signals depending on sentiment processing. We discuss each of the involved abilities, present working implementations and show how they are combined in an embodied cognitive architecture to achieve the fluent coordination and joint-action timing needed in live events.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Talking with Sentiment: Adaptive Expression Generation Behavior for Social Robots.\n \n \n \n \n\n\n \n Rodriguez, I.; Manfré, A.; Vella, F.; Infantino, I.; and Lazkano, E.\n\n\n \n\n\n\n Advances in Intelligent Systems and Computing, 855: 209-223. 11 2018.\n \n\n\n\n
\n\n\n\n \n \n \"TalkingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Talking with Sentiment: Adaptive Expression Generation Behavior for Social Robots},\n type = {article},\n year = {2018},\n keywords = {Cognitive robotics,Generative adversarial network,Human-robot interaction,Social robotics},\n pages = {209-223},\n volume = {855},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-99885-5_15},\n month = {11},\n publisher = {Springer, Cham},\n day = {22},\n id = {10dcf7e7-1093-398a-abf2-8451593fd7c0},\n created = {2022-11-28T15:33:04.127Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:04.127Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a neural-based approach for generating natural gesticulation movements for a humanoid robot enriched with other relevant social signals depending on sentiment processing. In particular, we take into account some simple head postures, voice parameters, and eyes colors as expressiveness enhancing elements. A Generative Adversarial Network (GAN) allows the proposed system to extend the variability of basic gesticulation movements while avoiding repetitive and monotonous behavior. Using sentiment analysis on the text that will be pronounced by the robot, we derive a value for emotion valence and coherently choose suitable parameters for the expressive elements. In this way, the robot has an adaptive expression generation during talking. Experiments validate the proposed approach by analyzing the contribution of all the factors to understand the naturalness perception of the robot behavior.},\n bibtype = {article},\n author = {Rodriguez, Igor and Manfré, Adriano and Vella, Filippo and Infantino, Ignazio and Lazkano, Elena},\n doi = {10.1007/978-3-319-99885-5_15},\n journal = {Advances in Intelligent Systems and Computing}\n}
\n
\n\n\n
\n This paper presents a neural-based approach for generating natural gesticulation movements for a humanoid robot enriched with other relevant social signals depending on sentiment processing. In particular, we take into account some simple head postures, voice parameters, and eyes colors as expressiveness enhancing elements. A Generative Adversarial Network (GAN) allows the proposed system to extend the variability of basic gesticulation movements while avoiding repetitive and monotonous behavior. Using sentiment analysis on the text that will be pronounced by the robot, we derive a value for emotion valence and coherently choose suitable parameters for the expressive elements. In this way, the robot has an adaptive expression generation during talking. Experiments validate the proposed approach by analyzing the contribution of all the factors to understand the naturalness perception of the robot behavior.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards a framework for socially interactive robots.\n \n \n \n \n\n\n \n Rodriguez, I.\n\n\n \n\n\n\n 2018.\n \n\n\n\n
\n\n\n\n \n \n \"TowardsWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{\n title = {Towards a framework for socially interactive robots},\n type = {misc},\n year = {2018},\n websites = {https://addi.ehu.es/handle/10810/32284},\n id = {69292fec-3739-39cd-a210-23299af09c2c},\n created = {2022-11-28T15:33:04.352Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:04.352Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {misc},\n author = {Rodriguez, Igor}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Nao robot as rehabilitation assistant in a kinect controlled system.\n \n \n \n \n\n\n \n Rodríguez, I.; Aguado, A.; Parra, O.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Volume 15 . Biosystems and Biorobotics, pages 419-423. Springer International Publishing, 2017.\n \n\n\n\n
\n\n\n\n \n \n \"BiosystemsWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2017},\n pages = {419-423},\n volume = {15},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-46669-9_70},\n publisher = {Springer International Publishing},\n id = {e8587ede-f288-3e64-abb6-aa56f370351c},\n created = {2022-11-28T15:32:58.380Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:58.380Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {In this paper NAO robot is presented as a Home Rehabilitation assistant; Machine Learning is used to classify the data provided by a Kinect RGB-D sensor in order to obtain a Home Exercise Monitoring System which aims at helping physicians controlling patient at home rehabilitation.},\n bibtype = {inbook},\n author = {Rodríguez, I. and Aguado, A. and Parra, O. and Lazkano, E. and Sierra, B.},\n doi = {10.1007/978-3-319-46669-9_70},\n chapter = {Nao robot as rehabilitation assistant in a kinect controlled system},\n title = {Biosystems and Biorobotics}\n}
\n
\n\n\n
\n In this paper NAO robot is presented as a Home Rehabilitation assistant; Machine Learning is used to classify the data provided by a Kinect RGB-D sensor in order to obtain a Home Exercise Monitoring System which aims at helping physicians controlling patient at home rehabilitation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n NAO Robot as Rehabilitation Assistant in a Kinect Controlled System.\n \n \n \n \n\n\n \n Rodríguez, I.; Aguado, A.; Parra, O.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Biosystems and Biorobotics, 15: 419-423. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"NAOWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {NAO Robot as Rehabilitation Assistant in a Kinect Controlled System},\n type = {article},\n year = {2017},\n pages = {419-423},\n volume = {15},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-46669-9_70},\n publisher = {Springer, Cham},\n id = {65605515-293a-3871-845f-5bcb1d90f669},\n created = {2022-11-28T15:32:58.619Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:58.619Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {In this paper NAO robot is presented as a Home Rehabilitation assistant; Machine Learning is used to classify the data provided by a Kinect RGB-D sensor in order to obtain a Home Exercise Monitoring System which aims at helping physicians controlling patient at home rehabilitation.},\n bibtype = {article},\n author = {Rodríguez, I. and Aguado, A. and Parra, O. and Lazkano, E. and Sierra, B.},\n doi = {10.1007/978-3-319-46669-9_70},\n journal = {Biosystems and Biorobotics}\n}
\n
\n\n\n
\n In this paper NAO robot is presented as a Home Rehabilitation assistant; Machine Learning is used to classify the data provided by a Kinect RGB-D sensor in order to obtain a Home Exercise Monitoring System which aims at helping physicians controlling patient at home rehabilitation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Emotional Chatting Behavior to Increase the Sociability of Robots.\n \n \n \n \n\n\n \n Rodriguez, I.; Martínez-Otzeta, J., M.; Lazkano, E.; and Ruiz, T.\n\n\n \n\n\n\n In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), volume 10652 LNAI, pages 666-675, 11 2017. Springer Verlag\n \n\n\n\n
\n\n\n\n \n \n \"AdaptiveWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Adaptive Emotional Chatting Behavior to Increase the Sociability of Robots},\n type = {inproceedings},\n year = {2017},\n keywords = {Body language,Emotion expression,Humanoid robot,Sentiment analysis},\n pages = {666-675},\n volume = {10652 LNAI},\n websites = {https://doi.org/10.1007/978-3-319-70022-9_66},\n month = {11},\n publisher = {Springer Verlag},\n day = {22},\n id = {934db6ae-fb29-38c6-b1b3-d316c79f6fad},\n created = {2022-11-28T15:33:00.169Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:00.169Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Emotion expression is one of the characteristics that make us social beings. It is one of the main forms, along with oral and written language, that gives us a glimpse into the inner mental state of another individual. One of the aims of social robotics is the effortless communication between humans and robots. To achieve this goal, robotic emotional expression is a key ability, as it offers a more natural way to interact in a human-robot environment. In this paper a system to express the emotional content of a spoken text is presented. Head and arms movements, along with eye LED lighting and voice intonation are combined to make a humanoid robot express the sadness-happiness emotion continuum. The robot is able to express the emotional meaning of texts in English, Spanish and Basque languages.},\n bibtype = {inproceedings},\n author = {Rodriguez, Igor and Martínez-Otzeta, José María and Lazkano, Elena and Ruiz, Txelo},\n doi = {10.1007/978-3-319-70022-9_66},\n booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Emotion expression is one of the characteristics that make us social beings. It is one of the main forms, along with oral and written language, that gives us a glimpse into the inner mental state of another individual. One of the aims of social robotics is the effortless communication between humans and robots. To achieve this goal, robotic emotional expression is a key ability, as it offers a more natural way to interact in a human-robot environment. In this paper a system to express the emotional content of a spoken text is presented. Head and arms movements, along with eye LED lighting and voice intonation are combined to make a humanoid robot express the sadness-happiness emotion continuum. The robot is able to express the emotional meaning of texts in English, Spanish and Basque languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Body Self-Awareness for Social Robots.\n \n \n \n\n\n \n Rodriguez, I.; Astigarraga, A.; Ruiz, T.; and Lazkano, E.\n\n\n \n\n\n\n Proceedings - 2017 International Conference on Control, Artificial Intelligence, Robotics and Optimization, ICCAIRO 2017, 2018-Janua: 69-73. 7 2017.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Body Self-Awareness for Social Robots},\n type = {article},\n year = {2017},\n pages = {69-73},\n volume = {2018-Janua},\n month = {7},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n day = {1},\n id = {031fb090-44c3-3331-8a9f-c5c2178ee8cc},\n created = {2022-11-28T15:33:03.167Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:03.167Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Just as humans show conscious of their body, social robots, in the way to be truly autonomous, they should also be able to recognize its own configuration. Our research group is working on a project named BertsoBot which aims to develop social minstrel robots for entertainment. The work presented here focuses on the automatic recognition of robot self body postures. Only proprioceptive information is being used and several supervised classifiers are compared to make the approrpiate choice that fulfills the task requirements. A ROS module that performs the online classification has been implemented for endowing the robot with self awareness capabilities. The developed implementation allows our NAO minstrel robot to make decisions based on its body posture and state instead of just relying on a blind finite state automaton. A demo is provided in a link to a video.},\n bibtype = {article},\n author = {Rodriguez, Igor and Astigarraga, Aitzol and Ruiz, Txelo and Lazkano, Elena},\n doi = {10.1109/ICCAIRO.2017.23},\n journal = {Proceedings - 2017 International Conference on Control, Artificial Intelligence, Robotics and Optimization, ICCAIRO 2017}\n}
\n
\n\n\n
\n Just as humans show conscious of their body, social robots, in the way to be truly autonomous, they should also be able to recognize its own configuration. Our research group is working on a project named BertsoBot which aims to develop social minstrel robots for entertainment. The work presented here focuses on the automatic recognition of robot self body postures. Only proprioceptive information is being used and several supervised classifiers are compared to make the approrpiate choice that fulfills the task requirements. A ROS module that performs the online classification has been implemented for endowing the robot with self awareness capabilities. The developed implementation allows our NAO minstrel robot to make decisions based on its body posture and state instead of just relying on a blind finite state automaton. A demo is provided in a link to a video.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Emotional Chatting Behavior to Increase the Sociability of Robots.\n \n \n \n \n\n\n \n Rodriguez, I.; Martínez-Otzeta, J., M.; Lazkano, E.; and Ruiz, T.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 10652 LNAI: 666-675. 11 2017.\n \n\n\n\n
\n\n\n\n \n \n \"AdaptiveWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Adaptive Emotional Chatting Behavior to Increase the Sociability of Robots},\n type = {article},\n year = {2017},\n keywords = {Body language,Emotion expression,Humanoid robot,Sentiment analysis},\n pages = {666-675},\n volume = {10652 LNAI},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-70022-9_66},\n month = {11},\n publisher = {Springer, Cham},\n day = {22},\n id = {0262b1c3-0f72-3fdf-8086-d6899bc658e1},\n created = {2022-11-28T15:33:03.385Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:03.385Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Emotion expression is one of the characteristics that make us social beings. It is one of the main forms, along with oral and written language, that gives us a glimpse into the inner mental state of another individual. One of the aims of social robotics is the effortless communication between humans and robots. To achieve this goal, robotic emotional expression is a key ability, as it offers a more natural way to interact in a human-robot environment. In this paper a system to express the emotional content of a spoken text is presented. Head and arms movements, along with eye LED lighting and voice intonation are combined to make a humanoid robot express the sadness-happiness emotion continuum. The robot is able to express the emotional meaning of texts in English, Spanish and Basque languages.},\n bibtype = {article},\n author = {Rodriguez, Igor and Martínez-Otzeta, José María and Lazkano, Elena and Ruiz, Txelo},\n doi = {10.1007/978-3-319-70022-9_66},\n journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Emotion expression is one of the characteristics that make us social beings. It is one of the main forms, along with oral and written language, that gives us a glimpse into the inner mental state of another individual. One of the aims of social robotics is the effortless communication between humans and robots. To achieve this goal, robotic emotional expression is a key ability, as it offers a more natural way to interact in a human-robot environment. In this paper a system to express the emotional content of a spoken text is presented. Head and arms movements, along with eye LED lighting and voice intonation are combined to make a humanoid robot express the sadness-happiness emotion continuum. The robot is able to express the emotional meaning of texts in English, Spanish and Basque languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Emotional poetry generation.\n \n \n \n \n\n\n \n Astigarraga, A.; Martínez-Otzeta, J., M.; Rodriguez, I.; Sierra, B.; and Lazkano, E.\n\n\n \n\n\n\n In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), volume 10458 LNAI, pages 332-342, 2017. Springer Verlag\n \n\n\n\n
\n\n\n\n \n \n \"EmotionalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Emotional poetry generation},\n type = {inproceedings},\n year = {2017},\n keywords = {Affective computing,Basque language,Poetry generation,Sentiment analysis},\n pages = {332-342},\n volume = {10458 LNAI},\n websites = {http://www.sc.ehu.es/ccwrobot},\n publisher = {Springer Verlag},\n id = {2ae41bb0-27a9-3ef0-bd05-ad2370eeccc1},\n created = {2022-11-28T15:33:07.334Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:07.334Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {In this article we describe a new system for the automatic creation of poetry in Basque that not only generates novel poems, but also creates them conveying a certain attitude or state of mind. A poem is a text structured according to predefined formal rules, whose parts are semantically related and with an intended message, aiming to elicit an emotional response. The proposed system receives as an input the topic of the poem and the affective state (positive, neutral or negative) and tries to give as output a novel poem that: (1) satisfies formal constraints of rhyme and metric, (2) shows coherent content related to the given topic, and (3) expresses them through the predetermined mood. Although the presented system creates poems in Basque, it is highly modular and easily extendable to new languages.},\n bibtype = {inproceedings},\n author = {Astigarraga, Aitzol and Martínez-Otzeta, José María and Rodriguez, Igor and Sierra, Basilio and Lazkano, Elena},\n doi = {10.1007/978-3-319-66429-3_32},\n booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n In this article we describe a new system for the automatic creation of poetry in Basque that not only generates novel poems, but also creates them conveying a certain attitude or state of mind. A poem is a text structured according to predefined formal rules, whose parts are semantically related and with an intended message, aiming to elicit an emotional response. The proposed system receives as an input the topic of the poem and the affective state (positive, neutral or negative) and tries to give as output a novel poem that: (1) satisfies formal constraints of rhyme and metric, (2) shows coherent content related to the given topic, and (3) expresses them through the predetermined mood. Although the presented system creates poems in Basque, it is highly modular and easily extendable to new languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Markov Text Generator for Basque Poetry.\n \n \n \n \n\n\n \n Astigarraga, A.; Martínez-Otzeta, J., M.; Rodriguez, I.; Sierra, B.; and Lazkano, E.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 10415 LNAI: 228-236. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"MarkovWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Markov Text Generator for Basque Poetry},\n type = {article},\n year = {2017},\n keywords = {Basque language,N-grams,Poetry generation},\n pages = {228-236},\n volume = {10415 LNAI},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-64206-2_26},\n publisher = {Springer, Cham},\n id = {91096e86-1d86-38d3-9ee8-6366df51bb97},\n created = {2022-11-28T15:33:07.635Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:07.635Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Poetry generation is a challenging field in the area of natural language processing. A poem is a text structured according to predefined formal rules and whose parts are semantically related. In this work we present a novel automated system to generate poetry in Basque language conditioned by non-local constraints. From a given corpus two Markov chains representing forward and backward 2-grams are built. From these Markov chains and a semantic model, a system able to generate poems conforming a given metric and following semantic cues has been designed. The user is prompted to input a theme for the poem and also a seed word to start the generating process. The system produces several poems in less than a minute, enough for using it in live events.},\n bibtype = {article},\n author = {Astigarraga, Aitzol and Martínez-Otzeta, José María and Rodriguez, Igor and Sierra, Basilio and Lazkano, Elena},\n doi = {10.1007/978-3-319-64206-2_26},\n journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Poetry generation is a challenging field in the area of natural language processing. A poem is a text structured according to predefined formal rules and whose parts are semantically related. In this work we present a novel automated system to generate poetry in Basque language conditioned by non-local constraints. From a given corpus two Markov chains representing forward and backward 2-grams are built. From these Markov chains and a semantic model, a system able to generate poems conforming a given metric and following semantic cues has been designed. The user is prompted to input a theme for the poem and also a seed word to start the generating process. The system produces several poems in less than a minute, enough for using it in live events.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Emotional Poetry Generation.\n \n \n \n \n\n\n \n Astigarraga, A.; Martínez-Otzeta, J., M.; Rodriguez, I.; Sierra, B.; and Lazkano, E.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 10458 LNAI: 332-342. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"EmotionalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Emotional Poetry Generation},\n type = {article},\n year = {2017},\n keywords = {Affective computing,Basque language,Poetry generation,Sentiment analysis},\n pages = {332-342},\n volume = {10458 LNAI},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-66429-3_32},\n publisher = {Springer, Cham},\n id = {c6299fdc-4591-3a58-af96-57b2479df6e0},\n created = {2022-11-28T15:33:07.878Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:07.878Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {In this article we describe a new system for the automatic creation of poetry in Basque that not only generates novel poems, but also creates them conveying a certain attitude or state of mind. A poem is a text structured according to predefined formal rules, whose parts are semantically related and with an intended message, aiming to elicit an emotional response. The proposed system receives as an input the topic of the poem and the affective state (positive, neutral or negative) and tries to give as output a novel poem that: (1) satisfies formal constraints of rhyme and metric, (2) shows coherent content related to the given topic, and (3) expresses them through the predetermined mood. Although the presented system creates poems in Basque, it is highly modular and easily extendable to new languages.},\n bibtype = {article},\n author = {Astigarraga, Aitzol and Martínez-Otzeta, José María and Rodriguez, Igor and Sierra, Basilio and Lazkano, Elena},\n doi = {10.1007/978-3-319-66429-3_32},\n journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n In this article we describe a new system for the automatic creation of poetry in Basque that not only generates novel poems, but also creates them conveying a certain attitude or state of mind. A poem is a text structured according to predefined formal rules, whose parts are semantically related and with an intended message, aiming to elicit an emotional response. The proposed system receives as an input the topic of the poem and the affective state (positive, neutral or negative) and tries to give as output a novel poem that: (1) satisfies formal constraints of rhyme and metric, (2) shows coherent content related to the given topic, and (3) expresses them through the predetermined mood. Although the presented system creates poems in Basque, it is highly modular and easily extendable to new languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Supervised + Unsupervised Classification for Human Pose Estimation with RGB-D Images: A First Step Towards a Rehabilitation System.\n \n \n \n \n\n\n \n Aguado, A.; Rodríguez, I.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Biosystems and Biorobotics, 15: 795-800. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"SupervisedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Supervised + Unsupervised Classification for Human Pose Estimation with RGB-D Images: A First Step Towards a Rehabilitation System},\n type = {article},\n year = {2017},\n pages = {795-800},\n volume = {15},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-46669-9_130},\n publisher = {Springer, Cham},\n id = {351166c9-29a7-3cad-bb80-63fb6effcd02},\n created = {2022-11-28T15:33:08.094Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:08.094Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {A system has been developed to detect postures and movements of people, using the skeleton information provided by the OpenNI library. A supervised learning approach has been used for generating static posture classifier models. In the case of movements, the focus has been done in clustering techniques. These models are included as part of the system software once generated, which reacts to postures and gestures made by any user. The automatic detection of postures is interesting for many applications, such as medical applications or intelligent interaction based on computer vision.},\n bibtype = {article},\n author = {Aguado, A. and Rodríguez, I. and Lazkano, E. and Sierra, B.},\n doi = {10.1007/978-3-319-46669-9_130},\n journal = {Biosystems and Biorobotics}\n}
\n
\n\n\n
\n A system has been developed to detect postures and movements of people, using the skeleton information provided by the OpenNI library. A supervised learning approach has been used for generating static posture classifier models. In the case of movements, the focus has been done in clustering techniques. These models are included as part of the system software once generated, which reacts to postures and gestures made by any user. The automatic detection of postures is interesting for many applications, such as medical applications or intelligent interaction based on computer vision.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Iris matching by means of Machine Learning paradigms: A new approach to dissimilarity computation.\n \n \n \n\n\n \n Aginako, N.; Echegaray, G.; Martínez-Otzeta, J., M.; Rodríguez, I.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Pattern Recognition Letters, 91: 60-64. 5 2017.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Iris matching by means of Machine Learning paradigms: A new approach to dissimilarity computation},\n type = {article},\n year = {2017},\n keywords = {Biometrics,Dissimilarity computation,Image Processing,Iris recognition,Machine Learning},\n pages = {60-64},\n volume = {91},\n month = {5},\n publisher = {Elsevier B.V.},\n day = {1},\n id = {78b099bc-74c5-35db-bccc-c9ae4175e79f},\n created = {2022-11-28T15:33:08.321Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:08.321Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a novel approach for iris dissimilarity computation based on Computer Vision and Machine Learning. First, iris images are processed using well-known image processing algorithms. Pixels of the output image are considered the input of the previously trained classifiers, obtaining the a posteriori probability for each of the considered class values. The main novelty of the presented work remains in the computation of the dissimilarity value of two iris images as the distance between the aforementioned a posteriori probabilities. Experimental results, based on the testing dataset given by the MICHE II Challenge organizers, indicate the appropriateness of the deployed method for the iris recognition task. Best results show a precision score above 90% even for iris images of new individuals.},\n bibtype = {article},\n author = {Aginako, Naiara and Echegaray, Goretti and Martínez-Otzeta, J. M. and Rodríguez, Igor and Lazkano, Elena and Sierra, Basilio},\n doi = {10.1016/j.patrec.2017.01.019},\n journal = {Pattern Recognition Letters}\n}
\n
\n\n\n
\n This paper presents a novel approach for iris dissimilarity computation based on Computer Vision and Machine Learning. First, iris images are processed using well-known image processing algorithms. Pixels of the output image are considered the input of the previously trained classifiers, obtaining the a posteriori probability for each of the considered class values. The main novelty of the presented work remains in the computation of the dissimilarity value of two iris images as the distance between the aforementioned a posteriori probabilities. Experimental results, based on the testing dataset given by the MICHE II Challenge organizers, indicate the appropriateness of the deployed method for the iris recognition task. Best results show a precision score above 90% even for iris images of new individuals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Iris matching by means of Machine Learning paradigms: A new approach to dissimilarity computation.\n \n \n \n\n\n \n Aginako, N.; Echegaray, G.; Martínez-Otzeta, J., M.; Rodríguez, I.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Pattern Recognition Letters, 91: 60-64. 5 2017.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Iris matching by means of Machine Learning paradigms: A new approach to dissimilarity computation},\n type = {article},\n year = {2017},\n keywords = {Biometrics,Dissimilarity computation,Image Processing,Iris recognition,Machine Learning},\n pages = {60-64},\n volume = {91},\n month = {5},\n publisher = {North-Holland},\n day = {1},\n id = {09bb2864-f285-3a31-8607-33d512aba932},\n created = {2022-11-28T15:33:09.332Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:09.332Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a novel approach for iris dissimilarity computation based on Computer Vision and Machine Learning. First, iris images are processed using well-known image processing algorithms. Pixels of the output image are considered the input of the previously trained classifiers, obtaining the a posteriori probability for each of the considered class values. The main novelty of the presented work remains in the computation of the dissimilarity value of two iris images as the distance between the aforementioned a posteriori probabilities. Experimental results, based on the testing dataset given by the MICHE II Challenge organizers, indicate the appropriateness of the deployed method for the iris recognition task. Best results show a precision score above 90% even for iris images of new individuals.},\n bibtype = {article},\n author = {Aginako, Naiara and Echegaray, Goretti and Martínez-Otzeta, J. M. and Rodríguez, Igor and Lazkano, Elena and Sierra, Basilio},\n doi = {10.1016/J.PATREC.2017.01.019},\n journal = {Pattern Recognition Letters}\n}
\n
\n\n\n
\n This paper presents a novel approach for iris dissimilarity computation based on Computer Vision and Machine Learning. First, iris images are processed using well-known image processing algorithms. Pixels of the output image are considered the input of the previously trained classifiers, obtaining the a posteriori probability for each of the considered class values. The main novelty of the presented work remains in the computation of the dissimilarity value of two iris images as the distance between the aforementioned a posteriori probabilities. Experimental results, based on the testing dataset given by the MICHE II Challenge organizers, indicate the appropriateness of the deployed method for the iris recognition task. Best results show a precision score above 90% even for iris images of new individuals.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Singing minstrel robots, a means for improving social behaviors.\n \n \n \n\n\n \n Rodriguez, I.; Astigarraga, A.; Ruiz, T.; and Lazkano, E.\n\n\n \n\n\n\n In Proceedings - IEEE International Conference on Robotics and Automation, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Singing minstrel robots, a means for improving social behaviors},\n type = {inproceedings},\n year = {2016},\n id = {0406857a-9ca3-3571-92f6-6e4867413e4e},\n created = {2022-11-28T15:32:59.147Z},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:59.147Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Bertsolaritza, Basque improvised contest poetry, offers another sphere to develop robot body language and robot communication capabilities, that shares some similarities with theatrical performances. It is also a new area to work on social robotics. The work presented in this paper makes some steps forward in designing and implementing the set of behaviors the robots need to show in the stage to increase, on the one hand robot autonomy and on the other hand, credibility and sociability.},\n bibtype = {inproceedings},\n author = {Rodriguez, Igor and Astigarraga, Aitzol and Ruiz, Txelo and Lazkano, Elena},\n doi = {10.1109/ICRA.2016.7487454},\n booktitle = {Proceedings - IEEE International Conference on Robotics and Automation}\n}
\n
\n\n\n
\n Bertsolaritza, Basque improvised contest poetry, offers another sphere to develop robot body language and robot communication capabilities, that shares some similarities with theatrical performances. It is also a new area to work on social robotics. The work presented in this paper makes some steps forward in designing and implementing the set of behaviors the robots need to show in the stage to increase, on the one hand robot autonomy and on the other hand, credibility and sociability.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Standardization of a Heterogeneous Robots Society Based on ROS.\n \n \n \n \n\n\n \n Rodriguez, I.; Jauregi, E.; Astigarraga, A.; Ruiz, T.; and Lazkano, E.\n\n\n \n\n\n\n Studies in Computational Intelligence, 625: 289-313. 2 2016.\n \n\n\n\n
\n\n\n\n \n \n \"StandardizationWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Standardization of a Heterogeneous Robots Society Based on ROS},\n type = {article},\n year = {2016},\n keywords = {Heterogeneous robots,Human-robot interaction,Navigation,Old robot renewal,Speech recognition,Standardization,Teleoperation},\n pages = {289-313},\n volume = {625},\n websites = {https://link.springer.com/chapter/10.1007/978-3-319-26054-9_11},\n month = {2},\n publisher = {Springer, Cham},\n day = {1},\n id = {f52f3711-5027-3cb6-8cee-0f17c18e0dc6},\n created = {2022-11-28T15:33:02.632Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:02.632Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {In this use case chapter the use of ROS is presented to achieve the standardization of a heterogeneous robots society. So on, several specific packages have been developed. Some case studies have been analized usingROS to control particular robots different in nature and morphology in some applications of interest in robotics such as navigation and teleoperation, and results are presented. All the developed work runs for Indigo version of ROS and the open source code is available at RSAIT’s github (github.com/rsait). Some videos can be seen at our youtube: channel https://www.youtube.com/channel/UCT1s6oS21d8fxFeugxCrjnQ.},\n bibtype = {article},\n author = {Rodriguez, Igor and Jauregi, Ekaitz and Astigarraga, Aitzol and Ruiz, Txelo and Lazkano, Elena},\n doi = {10.1007/978-3-319-26054-9_11},\n journal = {Studies in Computational Intelligence}\n}
\n
\n\n\n
\n In this use case chapter the use of ROS is presented to achieve the standardization of a heterogeneous robots society. So on, several specific packages have been developed. Some case studies have been analized usingROS to control particular robots different in nature and morphology in some applications of interest in robotics such as navigation and teleoperation, and results are presented. All the developed work runs for Indigo version of ROS and the open source code is available at RSAIT’s github (github.com/rsait). Some videos can be seen at our youtube: channel https://www.youtube.com/channel/UCT1s6oS21d8fxFeugxCrjnQ.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Singing minstrel robots, a means for improving social behaviors.\n \n \n \n\n\n \n Rodriguez, I.; Astigarraga, A.; Ruiz, T.; and Lazkano, E.\n\n\n \n\n\n\n Proceedings - IEEE International Conference on Robotics and Automation, 2016-June: 2902-2907. 6 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Singing minstrel robots, a means for improving social behaviors},\n type = {article},\n year = {2016},\n pages = {2902-2907},\n volume = {2016-June},\n month = {6},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n day = {8},\n id = {8bc36857-75e7-35d4-ac4f-bdd66cfef0e2},\n created = {2022-11-28T15:33:02.913Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:02.913Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Bertsolaritza, Basque improvised contest poetry, offers another sphere to develop robot body language and robot communication capabilities, that shares some similarities with theatrical performances. It is also a new area to work on social robotics. The work presented in this paper makes some steps forward in designing and implementing the set of behaviors the robots need to show in the stage to increase, on the one hand robot autonomy and on the other hand, credibility and sociability.},\n bibtype = {article},\n author = {Rodriguez, Igor and Astigarraga, Aitzol and Ruiz, Txelo and Lazkano, Elena},\n doi = {10.1109/ICRA.2016.7487454},\n journal = {Proceedings - IEEE International Conference on Robotics and Automation}\n}
\n
\n\n\n
\n Bertsolaritza, Basque improvised contest poetry, offers another sphere to develop robot body language and robot communication capabilities, that shares some similarities with theatrical performances. It is also a new area to work on social robotics. The work presented in this paper makes some steps forward in designing and implementing the set of behaviors the robots need to show in the stage to increase, on the one hand robot autonomy and on the other hand, credibility and sociability.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Undirected cyclic graph based multiclass pair-wise classifier: Classifier number reduction maintaining accuracy.\n \n \n \n\n\n \n Mendialdua, I.; Echegaray, G.; Rodriguez, I.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Neurocomputing, 171: 1576-1590. 1 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Undirected cyclic graph based multiclass pair-wise classifier: Classifier number reduction maintaining accuracy},\n type = {article},\n year = {2016},\n keywords = {Decomposition strategies,Machine learning,One-vs-One,Supervised classification},\n pages = {1576-1590},\n volume = {171},\n month = {1},\n publisher = {Elsevier},\n day = {1},\n id = {688a816a-422d-350d-a655-f485c3c465dc},\n created = {2022-11-28T15:33:05.650Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:05.650Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Supervised classification approaches try to classify correctly the new unlabelled examples based on a set of well-labelled samples. Nevertheless, some classification methods were formulated for binary classification problems and has difficulties for multi-class problems. Binarization strategies decompose the original multi-class dataset into multiple two-class subsets. For each new sub-problem a classifier is constructed. One-vs-One is a popular decomposition strategy that in each sub-problem discriminates the cases that belong to a pair of classes, ignoring the remaining ones. One of its drawbacks is that it creates a large number of classifiers, and some of them are irrelevant. In order to reduce the number of classifiers, in this paper we propose a new method called Decision Undirected Cyclic Graph. Instead of making the comparisons of all the pair of classes, each class is compared only with other two classes; evolutionary computation is used in the proposed approach in order to obtain suitable class pairing. In order to empirically show the performance of the proposed approach, a set of experiments over four popular Machine Learning algorithms are carried out, where our new method is compared with other well-known decomposition strategies of the literature obtaining promising results.},\n bibtype = {article},\n author = {Mendialdua, I. and Echegaray, G. and Rodriguez, I. and Lazkano, E. and Sierra, B.},\n doi = {10.1016/j.neucom.2015.07.078},\n journal = {Neurocomputing}\n}
\n
\n\n\n
\n Supervised classification approaches try to classify correctly the new unlabelled examples based on a set of well-labelled samples. Nevertheless, some classification methods were formulated for binary classification problems and has difficulties for multi-class problems. Binarization strategies decompose the original multi-class dataset into multiple two-class subsets. For each new sub-problem a classifier is constructed. One-vs-One is a popular decomposition strategy that in each sub-problem discriminates the cases that belong to a pair of classes, ignoring the remaining ones. One of its drawbacks is that it creates a large number of classifiers, and some of them are irrelevant. In order to reduce the number of classifiers, in this paper we propose a new method called Decision Undirected Cyclic Graph. Instead of making the comparisons of all the pair of classes, each class is compared only with other two classes; evolutionary computation is used in the proposed approach in order to obtain suitable class pairing. In order to empirically show the performance of the proposed approach, a set of experiments over four popular Machine Learning algorithms are carried out, where our new method is compared with other well-known decomposition strategies of the literature obtaining promising results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Undirected cyclic graph based multiclass pair-wise classifier: Classifier number reduction maintaining accuracy.\n \n \n \n\n\n \n Mendialdua, I.; Echegaray, G.; Rodriguez, I.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Neurocomputing, 171: 1576-1590. 1 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Undirected cyclic graph based multiclass pair-wise classifier: Classifier number reduction maintaining accuracy},\n type = {article},\n year = {2016},\n keywords = {Decomposition strategies,Machine learning,One-vs-One,Supervised classification},\n pages = {1576-1590},\n volume = {171},\n month = {1},\n publisher = {Elsevier},\n day = {1},\n id = {e1813e69-dda3-3472-bb49-4fd7176b7e66},\n created = {2022-11-28T15:33:06.113Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:06.113Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Supervised classification approaches try to classify correctly the new unlabelled examples based on a set of well-labelled samples. Nevertheless, some classification methods were formulated for binary classification problems and has difficulties for multi-class problems. Binarization strategies decompose the original multi-class dataset into multiple two-class subsets. For each new sub-problem a classifier is constructed. One-vs-One is a popular decomposition strategy that in each sub-problem discriminates the cases that belong to a pair of classes, ignoring the remaining ones. One of its drawbacks is that it creates a large number of classifiers, and some of them are irrelevant. In order to reduce the number of classifiers, in this paper we propose a new method called Decision Undirected Cyclic Graph. Instead of making the comparisons of all the pair of classes, each class is compared only with other two classes; evolutionary computation is used in the proposed approach in order to obtain suitable class pairing. In order to empirically show the performance of the proposed approach, a set of experiments over four popular Machine Learning algorithms are carried out, where our new method is compared with other well-known decomposition strategies of the literature obtaining promising results.},\n bibtype = {article},\n author = {Mendialdua, I. and Echegaray, G. and Rodriguez, I. and Lazkano, E. and Sierra, B.},\n doi = {10.1016/J.NEUCOM.2015.07.078},\n journal = {Neurocomputing}\n}
\n
\n\n\n
\n Supervised classification approaches try to classify correctly the new unlabelled examples based on a set of well-labelled samples. Nevertheless, some classification methods were formulated for binary classification problems and has difficulties for multi-class problems. Binarization strategies decompose the original multi-class dataset into multiple two-class subsets. For each new sub-problem a classifier is constructed. One-vs-One is a popular decomposition strategy that in each sub-problem discriminates the cases that belong to a pair of classes, ignoring the remaining ones. One of its drawbacks is that it creates a large number of classifiers, and some of them are irrelevant. In order to reduce the number of classifiers, in this paper we propose a new method called Decision Undirected Cyclic Graph. Instead of making the comparisons of all the pair of classes, each class is compared only with other two classes; evolutionary computation is used in the proposed approach in order to obtain suitable class pairing. In order to empirically show the performance of the proposed approach, a set of experiments over four popular Machine Learning algorithms are carried out, where our new method is compared with other well-known decomposition strategies of the literature obtaining promising results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Minstrel robots: Body language expression through applause evaluation.\n \n \n \n\n\n \n Kraemer, F.; Rodriguez, I.; Parra, O.; Ruiz, T.; and Lazkano, E.\n\n\n \n\n\n\n IEEE-RAS International Conference on Humanoid Robots,332-337. 12 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Minstrel robots: Body language expression through applause evaluation},\n type = {article},\n year = {2016},\n pages = {332-337},\n month = {12},\n publisher = {IEEE Computer Society},\n day = {30},\n id = {ebf3349a-4b45-310c-8da1-a7ff07cab980},\n created = {2022-11-28T15:33:06.581Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:06.581Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Currently humanoid robots become technically more capable of executing complex movements, showing human-like gestures, sometimes even facial expressions, and acting in general. While this lays the basis to make robot theater/enactments more and more interesting for the audience, another key-component is flexibility in the flow of an event to move on from simple pre-scripting. Here a sophisticated method is introduced relying on audio processing, clustering and machine learning techniques to evaluate audience's applauses, allowing the robot to infer self-evaluation about its actions. In a second step we use this information and a humanoid robot's body language to alter the flow of the event and display a reaction for the audience.},\n bibtype = {article},\n author = {Kraemer, F. and Rodriguez, I. and Parra, O. and Ruiz, T. and Lazkano, E.},\n doi = {10.1109/HUMANOIDS.2016.7803297},\n journal = {IEEE-RAS International Conference on Humanoid Robots}\n}
\n
\n\n\n
\n Currently humanoid robots become technically more capable of executing complex movements, showing human-like gestures, sometimes even facial expressions, and acting in general. While this lays the basis to make robot theater/enactments more and more interesting for the audience, another key-component is flexibility in the flow of an event to move on from simple pre-scripting. Here a sophisticated method is introduced relying on audio processing, clustering and machine learning techniques to evaluate audience's applauses, allowing the robot to infer self-evaluation about its actions. In a second step we use this information and a humanoid robot's body language to alter the flow of the event and display a reaction for the audience.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Machine Learning approach to dissimilarity computation: Iris matching.\n \n \n \n\n\n \n Aginako, N.; Martínez-Otzeta, J., M.; Rodriguez, I.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n In Proceedings - International Conference on Pattern Recognition, volume 0, pages 170-175, 1 2016. Institute of Electrical and Electronics Engineers Inc.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Machine Learning approach to dissimilarity computation: Iris matching},\n type = {inproceedings},\n year = {2016},\n pages = {170-175},\n volume = {0},\n month = {1},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n day = {1},\n id = {b494f16b-bbc8-3850-891b-327f3532e1c0},\n created = {2022-11-28T15:33:08.692Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:08.692Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a novel approach for iris dissimilarity computation based on Machine Learning paradigms and Computer Vision transformations. Based on the training dataset given by the MICHE II Challenge organizers, a set of classifiers has been constructed and tested, aiming at classifying a single image.},\n bibtype = {inproceedings},\n author = {Aginako, N. and Martínez-Otzeta, J. M. and Rodriguez, I. and Lazkano, E. and Sierra, B.},\n doi = {10.1109/ICPR.2016.7899628},\n booktitle = {Proceedings - International Conference on Pattern Recognition}\n}
\n
\n\n\n
\n This paper presents a novel approach for iris dissimilarity computation based on Machine Learning paradigms and Computer Vision transformations. Based on the training dataset given by the MICHE II Challenge organizers, a set of classifiers has been constructed and tested, aiming at classifying a single image.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Machine Learning approach to dissimilarity computation: Iris matching.\n \n \n \n\n\n \n Aginako, N.; Martínez-Otzeta, J., M.; Rodriguez, I.; Lazkano, E.; and Sierra, B.\n\n\n \n\n\n\n Proceedings - International Conference on Pattern Recognition, 0: 170-175. 1 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Machine Learning approach to dissimilarity computation: Iris matching},\n type = {article},\n year = {2016},\n pages = {170-175},\n volume = {0},\n month = {1},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n day = {1},\n id = {804221f8-639d-3e0c-876f-4ffd6b6fb982},\n created = {2022-11-28T15:33:09.086Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:09.086Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This paper presents a novel approach for iris dissimilarity computation based on Machine Learning paradigms and Computer Vision transformations. Based on the training dataset given by the MICHE II Challenge organizers, a set of classifiers has been constructed and tested, aiming at classifying a single image.},\n bibtype = {article},\n author = {Aginako, N. and Martínez-Otzeta, J. M. and Rodriguez, I. and Lazkano, E. and Sierra, B.},\n doi = {10.1109/ICPR.2016.7899628},\n journal = {Proceedings - International Conference on Pattern Recognition}\n}
\n
\n\n\n
\n This paper presents a novel approach for iris dissimilarity computation based on Machine Learning paradigms and Computer Vision transformations. Based on the training dataset given by the MICHE II Challenge organizers, a set of classifiers has been constructed and tested, aiming at classifying a single image.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Humanizing NAO robot teleoperation using ROS.\n \n \n \n\n\n \n Rodriguez, I.; Astigarraga, A.; Jauregi, E.; Ruiz, T.; and Lazkano, E.\n\n\n \n\n\n\n In IEEE-RAS International Conference on Humanoid Robots, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Humanizing NAO robot teleoperation using ROS},\n type = {inproceedings},\n year = {2015},\n id = {2dc74b8f-cfcc-3101-8e81-94198955ce05},\n created = {2022-11-28T15:32:59.408Z},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:32:59.408Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The work presented here proposes two different ROS packages to enrich the teleoperation of the robot NAO: speech-based teleoperation (in Basque) and gesture-based teleoperation together with arm control. These packages have been used and evaluated in a human mimicking experiment. The tools offered can serve as a base for many applications.},\n bibtype = {inproceedings},\n author = {Rodriguez, Igor and Astigarraga, A. and Jauregi, E. and Ruiz, T. and Lazkano, E.},\n doi = {10.1109/HUMANOIDS.2014.7041357},\n booktitle = {IEEE-RAS International Conference on Humanoid Robots}\n}
\n
\n\n\n
\n The work presented here proposes two different ROS packages to enrich the teleoperation of the robot NAO: speech-based teleoperation (in Basque) and gesture-based teleoperation together with arm control. These packages have been used and evaluated in a human mimicking experiment. The tools offered can serve as a base for many applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Humanizing NAO robot teleoperation using ROS.\n \n \n \n\n\n \n Rodriguez, I.; Astigarraga, A.; Jauregi, E.; Ruiz, T.; and Lazkano, E.\n\n\n \n\n\n\n IEEE-RAS International Conference on Humanoid Robots, 2015-Febru: 179-186. 2 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Humanizing NAO robot teleoperation using ROS},\n type = {article},\n year = {2015},\n pages = {179-186},\n volume = {2015-Febru},\n month = {2},\n publisher = {IEEE Computer Society},\n day = {12},\n id = {c9327bba-78bf-3d88-b51a-8d754fe277d8},\n created = {2022-11-28T15:33:00.451Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:00.451Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {The work presented here proposes two different ROS packages to enrich the teleoperation of the robot NAO: speech-based teleoperation (in Basque) and gesture-based teleoperation together with arm control. These packages have been used and evaluated in a human mimicking experiment. The tools offered can serve as a base for many applications.},\n bibtype = {article},\n author = {Rodriguez, Igor and Astigarraga, A. and Jauregi, E. and Ruiz, T. and Lazkano, E.},\n doi = {10.1109/HUMANOIDS.2014.7041357},\n journal = {IEEE-RAS International Conference on Humanoid Robots}\n}
\n
\n\n\n
\n The work presented here proposes two different ROS packages to enrich the teleoperation of the robot NAO: speech-based teleoperation (in Basque) and gesture-based teleoperation together with arm control. These packages have been used and evaluated in a human mimicking experiment. The tools offered can serve as a base for many applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dynamic selection of the best base classifier in One versus One.\n \n \n \n\n\n \n Mendialdua, I.; Martínez-Otzeta, J., M.; Rodriguez-Rodriguez, I.; Ruiz-Vazquez, T.; and Sierra, B.\n\n\n \n\n\n\n Knowledge-Based Systems, 85: 298-306. 9 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Dynamic selection of the best base classifier in One versus One},\n type = {article},\n year = {2015},\n keywords = {Classifier combination,Decomposition strategies,Dynamic classifier selection,Machine learning,One against One,Supervised classification},\n pages = {298-306},\n volume = {85},\n month = {9},\n publisher = {Elsevier},\n day = {1},\n id = {01b7a182-104c-3114-91bd-0dbffcf6f8c6},\n created = {2022-11-28T15:33:05.403Z},\n accessed = {2021-01-22},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:05.403Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Class binarization strategies decompose the original multi-class problem into several binary sub-problems. One versus One (OVO) is one of the most popular class binarization techniques, which considers every pair of classes as a different sub-problem. Usually, the same classifier is applied to every sub-problem and then all the outputs are combined by some voting scheme. In this paper we present a novel idea where for each test instance we try to assign the best classifier in each sub-problem of OVO. To do so, we have used two simple Dynamic Classifier Selection (DCS) strategies that have not been yet used in this context. The two DCS strategies use K-NN to obtain the local region of the test-instance, and the classifier that performs the best for those instances in the local region, is selected to classify the new test instance. The difference between the two DCS strategies remains in the weight of the instance. In this paper we have also proposed a novel approach in those DCS strategies. We propose to use the K-Nearest Neighbor Equality (K-NNE) method to obtain the local accuracy. K-NNE is an extension of K-NN in which all the classes are treated independently: the K nearest neighbors belonging to each class are selected. In this way all the classes take part in the final decision. We have carried out an empirical study over several UCI databases, which shows the robustness of our proposal.},\n bibtype = {article},\n author = {Mendialdua, I. and Martínez-Otzeta, J. M. and Rodriguez-Rodriguez, I. and Ruiz-Vazquez, T. and Sierra, B.},\n doi = {10.1016/j.knosys.2015.05.015},\n journal = {Knowledge-Based Systems}\n}
\n
\n\n\n
\n Class binarization strategies decompose the original multi-class problem into several binary sub-problems. One versus One (OVO) is one of the most popular class binarization techniques, which considers every pair of classes as a different sub-problem. Usually, the same classifier is applied to every sub-problem and then all the outputs are combined by some voting scheme. In this paper we present a novel idea where for each test instance we try to assign the best classifier in each sub-problem of OVO. To do so, we have used two simple Dynamic Classifier Selection (DCS) strategies that have not been yet used in this context. The two DCS strategies use K-NN to obtain the local region of the test-instance, and the classifier that performs the best for those instances in the local region, is selected to classify the new test instance. The difference between the two DCS strategies remains in the weight of the instance. In this paper we have also proposed a novel approach in those DCS strategies. We propose to use the K-Nearest Neighbor Equality (K-NNE) method to obtain the local accuracy. K-NNE is an extension of K-NN in which all the classes are treated independently: the K nearest neighbors belonging to each class are selected. In this way all the classes take part in the final decision. We have carried out an empirical study over several UCI databases, which shows the robustness of our proposal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dynamic selection of the best base classifier in One versus One.\n \n \n \n\n\n \n Mendialdua, I.; Martínez-Otzeta, J., M.; Rodriguez-Rodriguez, I.; Ruiz-Vazquez, T.; and Sierra, B.\n\n\n \n\n\n\n Knowledge-Based Systems, 85: 298-306. 9 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Dynamic selection of the best base classifier in One versus One},\n type = {article},\n year = {2015},\n keywords = {Classifier combination,Decomposition strategies,Dynamic classifier selection,Machine learning,One against One,Supervised classification},\n pages = {298-306},\n volume = {85},\n month = {9},\n publisher = {Elsevier},\n day = {1},\n id = {f9c876a8-4775-35c9-b88f-bd95847a2110},\n created = {2022-11-28T15:33:05.882Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:05.882Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {Class binarization strategies decompose the original multi-class problem into several binary sub-problems. One versus One (OVO) is one of the most popular class binarization techniques, which considers every pair of classes as a different sub-problem. Usually, the same classifier is applied to every sub-problem and then all the outputs are combined by some voting scheme. In this paper we present a novel idea where for each test instance we try to assign the best classifier in each sub-problem of OVO. To do so, we have used two simple Dynamic Classifier Selection (DCS) strategies that have not been yet used in this context. The two DCS strategies use K-NN to obtain the local region of the test-instance, and the classifier that performs the best for those instances in the local region, is selected to classify the new test instance. The difference between the two DCS strategies remains in the weight of the instance. In this paper we have also proposed a novel approach in those DCS strategies. We propose to use the K-Nearest Neighbor Equality (K-NNE) method to obtain the local accuracy. K-NNE is an extension of K-NN in which all the classes are treated independently: the K nearest neighbors belonging to each class are selected. In this way all the classes take part in the final decision. We have carried out an empirical study over several UCI databases, which shows the robustness of our proposal.},\n bibtype = {article},\n author = {Mendialdua, I. and Martínez-Otzeta, J. M. and Rodriguez-Rodriguez, I. and Ruiz-Vazquez, T. and Sierra, B.},\n doi = {10.1016/J.KNOSYS.2015.05.015},\n journal = {Knowledge-Based Systems}\n}
\n
\n\n\n
\n Class binarization strategies decompose the original multi-class problem into several binary sub-problems. One versus One (OVO) is one of the most popular class binarization techniques, which considers every pair of classes as a different sub-problem. Usually, the same classifier is applied to every sub-problem and then all the outputs are combined by some voting scheme. In this paper we present a novel idea where for each test instance we try to assign the best classifier in each sub-problem of OVO. To do so, we have used two simple Dynamic Classifier Selection (DCS) strategies that have not been yet used in this context. The two DCS strategies use K-NN to obtain the local region of the test-instance, and the classifier that performs the best for those instances in the local region, is selected to classify the new test instance. The difference between the two DCS strategies remains in the weight of the instance. In this paper we have also proposed a novel approach in those DCS strategies. We propose to use the K-Nearest Neighbor Equality (K-NNE) method to obtain the local accuracy. K-NNE is an extension of K-NN in which all the classes are treated independently: the K nearest neighbors belonging to each class are selected. In this way all the classes take part in the final decision. We have carried out an empirical study over several UCI databases, which shows the robustness of our proposal.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Towards Basque Oral Poetry Analysis: A Machine Learning Approach.\n \n \n \n \n\n\n \n Osinalde, M.; Astigarraga, A.; Rodriguez, I.; and Agirrezabal, M.\n\n\n \n\n\n\n Technical Report 2013.\n \n\n\n\n
\n\n\n\n \n \n \"TowardsWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{\n title = {Towards Basque Oral Poetry Analysis: A Machine Learning Approach},\n type = {techreport},\n year = {2013},\n pages = {9-11},\n websites = {http://www.cs.waikato.ac.nz/ml/weka/},\n id = {7996a809-59be-3c60-a87c-7994b1f1d7c7},\n created = {2022-11-28T15:33:05.011Z},\n accessed = {2021-01-25},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:05.011Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This work aims to study the narrative structure of Basque greeting verses from a text classification approach. We propose a set of thematic categories for the correct classification of verses, and then, use those categories to analyse the verses based on Machine Learning techniques. Classification methods such as Naive Bayes, k-NN, Support Vector Machines and Decision Tree Learner have been selected. Dimensionality reduction techniques have been applied in order to reduce the term space. The results shown by the experiments give an indication of the suitability of the proposed approach for the task at hands.},\n bibtype = {techreport},\n author = {Osinalde, Mikel and Astigarraga, Aitzol and Rodriguez, Igor and Agirrezabal, Manex}\n}
\n
\n\n\n
\n This work aims to study the narrative structure of Basque greeting verses from a text classification approach. We propose a set of thematic categories for the correct classification of verses, and then, use those categories to analyse the verses based on Machine Learning techniques. Classification methods such as Naive Bayes, k-NN, Support Vector Machines and Decision Tree Learner have been selected. Dimensionality reduction techniques have been applied in order to reduce the term space. The results shown by the experiments give an indication of the suitability of the proposed approach for the task at hands.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Basque Oral Poetry Analysis: A Machine Learning Approach.\n \n \n \n \n\n\n \n Osinalde, M.; Astigarraga, A.; Rodriguez, I.; and Agirrezabal, M.\n\n\n \n\n\n\n ,9-11. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"TowardsWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Towards Basque Oral Poetry Analysis: A Machine Learning Approach},\n type = {article},\n year = {2013},\n pages = {9-11},\n websites = {http://www.cs.waikato.ac.nz/ml/weka/},\n id = {9e10c2b6-04b7-33b1-b161-3695771fedbe},\n created = {2022-11-28T15:33:05.193Z},\n accessed = {2022-03-15},\n file_attached = {false},\n profile_id = {f67eca1d-c11e-3ca8-8fb7-763c9c95282d},\n group_id = {69b6a506-7e92-3ad9-b886-fd05207c084b},\n last_modified = {2022-11-28T15:33:05.193Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {This work aims to study the narrative structure of Basque greeting verses from a text classification approach. We propose a set of thematic categories for the correct classification of verses, and then, use those categories to analyse the verses based on Machine Learning techniques. Classification methods such as Naive Bayes, k-NN, Support Vector Machines and Decision Tree Learner have been selected. Dimensionality reduction techniques have been applied in order to reduce the term space. The results shown by the experiments give an indication of the suitability of the proposed approach for the task at hands.},\n bibtype = {article},\n author = {Osinalde, Mikel and Astigarraga, Aitzol and Rodriguez, Igor and Agirrezabal, Manex}\n}
\n
\n\n\n
\n This work aims to study the narrative structure of Basque greeting verses from a text classification approach. We propose a set of thematic categories for the correct classification of verses, and then, use those categories to analyse the verses based on Machine Learning techniques. Classification methods such as Naive Bayes, k-NN, Support Vector Machines and Decision Tree Learner have been selected. Dimensionality reduction techniques have been applied in order to reduce the term space. The results shown by the experiments give an indication of the suitability of the proposed approach for the task at hands.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);