var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/service/mendeley/4bc011ba-7716-36d2-b7a8-61c7a7600de9?jsonp=1&hidemenu=false&commas=true&noTitleLinks=true&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/service/mendeley/4bc011ba-7716-36d2-b7a8-61c7a7600de9?jsonp=1&hidemenu=false&commas=true&noTitleLinks=true\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/service/mendeley/4bc011ba-7716-36d2-b7a8-61c7a7600de9?jsonp=1&hidemenu=false&commas=true&noTitleLinks=true\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Feature Aware Re-weighting (FAR) in Bird's Eye View for LiDAR-based 3D object detection in autonomous driving applications.\n \n \n\n\n \n Zamanakos, G., Tsochatzidis, L., Amanatiadis, A., & Pratikakis, I.\n\n\n \n\n\n\n Robotics and Autonomous Systems, 175. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Feature Aware Re-weighting (FAR) in Bird's Eye View for LiDAR-based 3D object detection in autonomous driving applications},\n type = {article},\n year = {2024},\n keywords = {3D object detection,Autonomous driving,Bird's Eye View,Deep learning,LiDAR,Point cloud},\n volume = {175},\n id = {d6fc6292-b234-37e8-800a-52d8f4fe3426},\n created = {2024-03-30T13:53:00.208Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.208Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {3D object detection is a key element for the perception of autonomous vehicles. LiDAR sensors are commonly used to perceive the surrounding area, producing a sparse representation of the scene in the form of a point cloud. The current trend is to use deep learning neural network architectures that predict 3D bounding boxes. The vast majority of architectures process the LiDAR point cloud directly but, due to computation and memory constraints, at some point they compress the input to a 2D Bird's Eye View (BEV) representation. In this work, we propose a novel 2D neural network architecture, namely the Feature Aware Re-weighting Network, for feature extraction in BEV using local context via an attention mechanism, to improve the 3D detection performance of LiDAR-based detectors. Extensive experiments on five state-of-the-art detectors and three benchmarking datasets, namely KITTI, Waymo and nuScenes, demonstrate the effectiveness of the proposed method in terms of both detection performance and minimal added computational burden. We release our code at https://github.com/grgzam/FAR.},\n bibtype = {article},\n author = {Zamanakos, G. and Tsochatzidis, L. and Amanatiadis, A. and Pratikakis, I.},\n doi = {10.1016/j.robot.2024.104664},\n journal = {Robotics and Autonomous Systems}\n}
\n
\n\n\n
\n 3D object detection is a key element for the perception of autonomous vehicles. LiDAR sensors are commonly used to perceive the surrounding area, producing a sparse representation of the scene in the form of a point cloud. The current trend is to use deep learning neural network architectures that predict 3D bounding boxes. The vast majority of architectures process the LiDAR point cloud directly but, due to computation and memory constraints, at some point they compress the input to a 2D Bird's Eye View (BEV) representation. In this work, we propose a novel 2D neural network architecture, namely the Feature Aware Re-weighting Network, for feature extraction in BEV using local context via an attention mechanism, to improve the 3D detection performance of LiDAR-based detectors. Extensive experiments on five state-of-the-art detectors and three benchmarking datasets, namely KITTI, Waymo and nuScenes, demonstrate the effectiveness of the proposed method in terms of both detection performance and minimal added computational burden. We release our code at https://github.com/grgzam/FAR.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n SHREC 2023: Point cloud change detection for city scenes.\n \n \n\n\n \n Gao, Y., Yuan, H., Ku, T., Veltkamp, R., Zamanakos, G., Tsochatzidis, L., Amanatiadis, A., Pratikakis, I., Panou, A., Romanelis, I., Fotis, V., Arvanitis, G., & Moustakas, K.\n\n\n \n\n\n\n Computers and Graphics (Pergamon), 115. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {SHREC 2023: Point cloud change detection for city scenes},\n type = {article},\n year = {2023},\n keywords = {Point cloud change detection,Retrieval models and ranking,SHREC},\n volume = {115},\n id = {f6f677da-9803-3b64-9e77-401e0be041e7},\n created = {2024-03-30T13:53:00.218Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.218Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Localization and navigation are the two most important tasks for mobile robots, which require an up-to-date and accurate map. However, to detect map changes from crowdsourced data is a challenging task, especially from billions of points collected by 3D acquisition devices. Collecting 3D data often requires expensive data acquisition equipment and there are limited data sources to evaluate point cloud change detection. To address these issues, in this Shape Retrieval Challenge (SHREC) track, we provide a city-scene dataset with real and synthesized data to detect 3D point cloud change. The dataset consists of 866 pairs of object changes from 78 city-scene 3D point clouds collected by LiDAR and 845 pairs of object changes from 100 city-scene 3D point clouds generated by a high-fidelity simulator. We compare three methods on this benchmark. Evaluation results show that data-driven methods are the current trend in 3D point cloud change detection. Besides, the siamese network architecture is helpful to detect changes in our dataset. We hope this benchmark and comparative evaluation results will further enrich and boost the research of point cloud change detection and its applications.},\n bibtype = {article},\n author = {Gao, Y. and Yuan, H. and Ku, T. and Veltkamp, R.C. and Zamanakos, G. and Tsochatzidis, L. and Amanatiadis, A. and Pratikakis, I. and Panou, A. and Romanelis, I. and Fotis, V. and Arvanitis, G. and Moustakas, K.},\n doi = {10.1016/j.cag.2023.06.025},\n journal = {Computers and Graphics (Pergamon)}\n}
\n
\n\n\n
\n Localization and navigation are the two most important tasks for mobile robots, which require an up-to-date and accurate map. However, to detect map changes from crowdsourced data is a challenging task, especially from billions of points collected by 3D acquisition devices. Collecting 3D data often requires expensive data acquisition equipment and there are limited data sources to evaluate point cloud change detection. To address these issues, in this Shape Retrieval Challenge (SHREC) track, we provide a city-scene dataset with real and synthesized data to detect 3D point cloud change. The dataset consists of 866 pairs of object changes from 78 city-scene 3D point clouds collected by LiDAR and 845 pairs of object changes from 100 city-scene 3D point clouds generated by a high-fidelity simulator. We compare three methods on this benchmark. Evaluation results show that data-driven methods are the current trend in 3D point cloud change detection. Besides, the siamese network architecture is helpful to detect changes in our dataset. We hope this benchmark and comparative evaluation results will further enrich and boost the research of point cloud change detection and its applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Engaging Learners in Educational Robotics: Uncovering Students’ Expectations for an Ideal Robotic Platform.\n \n \n\n\n \n Kyprianou, G., Karousou, A., Makris, N., Sarafis, I., Amanatiadis, A., & Chatzichristofis, S.\n\n\n \n\n\n\n Electronics (Switzerland), 12(13). 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Engaging Learners in Educational Robotics: Uncovering Students’ Expectations for an Ideal Robotic Platform},\n type = {article},\n year = {2023},\n keywords = {STEM robotics,educational robotics,robot morphology,students’ expectations},\n volume = {12},\n id = {d56c6b93-a24c-3cdb-82bc-c652aaa5af3d},\n created = {2024-03-30T13:53:00.274Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.274Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Extensive research has been conducted on educational robotics (ER) platforms to explore their usage across different educational levels and assess their effectiveness in achieving desired learning outcomes. However, the existing literature has a limitation in regard to addressing learners’ specific preferences and characteristics regarding these platforms. To address this gap, it is crucial to encourage learners’ active participation in the design process of robotic platforms. By incorporating their valuable feedback and preferences and providing them with platforms that align with their interests, we can create a motivating environment that leads to increased engagement in science, technology, engineering and mathematics (STEM) courses and improved learning outcomes. Furthermore, this approach fosters a sense of absorption and full engagement among peers as they collaborate on assigned activities. To bridge the existing research gap, our study aimed to investigate the current trends in the morphology of educational robotics platforms. We surveyed students from multiple schools in Greece who had no prior exposure to robotic platforms. Our study aimed to understand students’ expectations of an ideal robotic companion. We examined the desired characteristics, modes of interaction, and socialization that students anticipate from such a companion. By uncovering these attributes and standards, we aimed to inform the development of an optimal model that effectively fulfills students’ educational aspirations while keeping them motivated and engaged.},\n bibtype = {article},\n author = {Kyprianou, G. and Karousou, A. and Makris, N. and Sarafis, I. and Amanatiadis, A. and Chatzichristofis, S.A.},\n doi = {10.3390/electronics12132865},\n journal = {Electronics (Switzerland)},\n number = {13}\n}
\n
\n\n\n
\n Extensive research has been conducted on educational robotics (ER) platforms to explore their usage across different educational levels and assess their effectiveness in achieving desired learning outcomes. However, the existing literature has a limitation in regard to addressing learners’ specific preferences and characteristics regarding these platforms. To address this gap, it is crucial to encourage learners’ active participation in the design process of robotic platforms. By incorporating their valuable feedback and preferences and providing them with platforms that align with their interests, we can create a motivating environment that leads to increased engagement in science, technology, engineering and mathematics (STEM) courses and improved learning outcomes. Furthermore, this approach fosters a sense of absorption and full engagement among peers as they collaborate on assigned activities. To bridge the existing research gap, our study aimed to investigate the current trends in the morphology of educational robotics platforms. We surveyed students from multiple schools in Greece who had no prior exposure to robotic platforms. Our study aimed to understand students’ expectations of an ideal robotic companion. We examined the desired characteristics, modes of interaction, and socialization that students anticipate from such a companion. By uncovering these attributes and standards, we aimed to inform the development of an optimal model that effectively fulfills students’ educational aspirations while keeping them motivated and engaged.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A Survey on Map-Based Localization Techniques for Autonomous Vehicles.\n \n \n\n\n \n Chalvatzaras, A., Pratikakis, I., & Amanatiadis, A.\n\n\n \n\n\n\n IEEE Transactions on Intelligent Vehicles, 8(2). 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A Survey on Map-Based Localization Techniques for Autonomous Vehicles},\n type = {article},\n year = {2023},\n keywords = {Autonomous vehicles,environment perception,high-definition maps,vehicle localization},\n volume = {8},\n id = {f8bfcc4b-7948-37b2-9811-a50d8a7b1a5e},\n created = {2024-03-30T13:53:00.278Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.278Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Autonomous vehicles integrate complex software stacks for realizing the necessary iterative perception, planning, and action operations. One of the foundational layers of such stacks is the perception one which is comprised of localization, detection, and recognition algorithms for understanding the location and the driving environment around the vehicle. More precisely, localization aims to identify the location of the vehicle on a global coordinate system and is considered one of the most critical parts in the stack since its accuracy and robustness affects the subsequent algorithms of the perception layer and also the following planning and action layers. Due to the rapid and significant interest in self-driving cars, several localization techniques have been proposed with different directions and approaches. Algorithms using prior maps are currently considered the most accurate ones and found almost in all current self-driving car prototypes. Thus, in this paper, we categorize, discuss and analyze the state-of-the-art map-based localization techniques in an attempt to examine their potentials and limitations. We first present techniques and approaches that aim to match prior maps with on-board observations from different sensor modalities. We then review methods that handle the localization problem as a probabilistic one and finally, we also go through the emerging domain of deep-learning localization algorithms and examine their potential in self-driving cars. For all three categories, we provide comparison tables and necessary insights for the optimal localization system design based on different requirements, specifications, and sensor configurations.},\n bibtype = {article},\n author = {Chalvatzaras, A. and Pratikakis, I. and Amanatiadis, A.A.},\n doi = {10.1109/TIV.2022.3192102},\n journal = {IEEE Transactions on Intelligent Vehicles},\n number = {2}\n}
\n
\n\n\n
\n Autonomous vehicles integrate complex software stacks for realizing the necessary iterative perception, planning, and action operations. One of the foundational layers of such stacks is the perception one which is comprised of localization, detection, and recognition algorithms for understanding the location and the driving environment around the vehicle. More precisely, localization aims to identify the location of the vehicle on a global coordinate system and is considered one of the most critical parts in the stack since its accuracy and robustness affects the subsequent algorithms of the perception layer and also the following planning and action layers. Due to the rapid and significant interest in self-driving cars, several localization techniques have been proposed with different directions and approaches. Algorithms using prior maps are currently considered the most accurate ones and found almost in all current self-driving car prototypes. Thus, in this paper, we categorize, discuss and analyze the state-of-the-art map-based localization techniques in an attempt to examine their potentials and limitations. We first present techniques and approaches that aim to match prior maps with on-board observations from different sensor modalities. We then review methods that handle the localization problem as a probabilistic one and finally, we also go through the emerging domain of deep-learning localization algorithms and examine their potential in self-driving cars. For all three categories, we provide comparison tables and necessary insights for the optimal localization system design based on different requirements, specifications, and sensor configurations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Gamification Techniques and Feedback Mechanisms for Educational Robots.\n \n \n\n\n \n Karousou, A., Makris, N., Sarafis, I., Chatzichristofis, S., & Amanatiadis, A.\n\n\n \n\n\n\n In IEEE International Conference on Consumer Electronics - Berlin, ICCE-Berlin, 2023. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Gamification Techniques and Feedback Mechanisms for Educational Robots},\n type = {inproceedings},\n year = {2023},\n keywords = {Educational activities,Educational robots,Human-Computer interaction,Problem-Solving,Social Cognition},\n id = {14f6bd96-1614-3eed-a44e-1cde658f3c44},\n created = {2024-03-30T13:53:00.341Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.341Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Educational robots serve a critical role in enhancing children's education, particularly in promoting interactive learning. With their ability to adapt to individual needs, the robots can offer dynamic teaching methods that improve learning outcomes. They also offer uninterrupted learning assistance beyond school hours, enabling consistent learning with their non-judgmental nature creating a safe environment where children can learn from their mistakes without fear of embarrassment. This aspect, combined with the novelty and fun associated with robots, enhances motivation and engagement in learning. In this paper we present a new educational robot, LINA, which is designed to captivate children's attention and foster active engagement by creating an enjoyable and immersive learning experience. Using gamification techniques and feedback mechanisms, LINA motivates children to participate actively, promoting their sustained interest and enthusiasm for learning. In the initial version of LINA we implemented several activities designed to go beyond traditional school subjects, targeting children's cognitive development and learning in various domains more directly. More specifically, a range of activities aimed to enhance language skills, mathematical and visuospatial problem solving, but also important social skills like Theory of Mind.},\n bibtype = {inproceedings},\n author = {Karousou, A. and Makris, N. and Sarafis, I. and Chatzichristofis, S. and Amanatiadis, A.},\n doi = {10.1109/ICCE-Berlin58801.2023.10375672},\n booktitle = {IEEE International Conference on Consumer Electronics - Berlin, ICCE-Berlin}\n}
\n
\n\n\n
\n Educational robots serve a critical role in enhancing children's education, particularly in promoting interactive learning. With their ability to adapt to individual needs, the robots can offer dynamic teaching methods that improve learning outcomes. They also offer uninterrupted learning assistance beyond school hours, enabling consistent learning with their non-judgmental nature creating a safe environment where children can learn from their mistakes without fear of embarrassment. This aspect, combined with the novelty and fun associated with robots, enhances motivation and engagement in learning. In this paper we present a new educational robot, LINA, which is designed to captivate children's attention and foster active engagement by creating an enjoyable and immersive learning experience. Using gamification techniques and feedback mechanisms, LINA motivates children to participate actively, promoting their sustained interest and enthusiasm for learning. In the initial version of LINA we implemented several activities designed to go beyond traditional school subjects, targeting children's cognitive development and learning in various domains more directly. More specifically, a range of activities aimed to enhance language skills, mathematical and visuospatial problem solving, but also important social skills like Theory of Mind.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A study on high definition maps' standards and specifications for autonomous vehicles.\n \n \n\n\n \n Siopi, M., Ellinoudis, D., Pratikakis, I., & Amanatiadis, A.\n\n\n \n\n\n\n In 2023 IEEE International Conference on Omni-Layer Intelligent Systems, COINS 2023, 2023. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {A study on high definition maps' standards and specifications for autonomous vehicles},\n type = {inproceedings},\n year = {2023},\n keywords = {Autonomous vehicles,High definition maps,Localization,Map conversion,Standardization},\n id = {13368754-2a82-3099-97de-a54b6a0e3e1b},\n created = {2024-03-30T13:53:00.347Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.347Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Prior high definition maps are currently required in the perception stack of autonomous vehicles since they encode road-level features in centimetre accuracy. This rapid map establishment has led to a vast range of different map vendors, formats, structures and standards. In this study, the landscape of high-definition maps is being analyzed, evaluated and compared. Unlike other similar works, the maps in this study are categorized by their formats and structures since map vendors have started to provide single structure maps in multiple formats. Although this wide availability helps to expand the map usage, many aspects have to be considered and evaluated for choosing the most appropriate one. Apart from the detailed review, key issues and a discussion concerning the use of prior maps is provided to facilitate better map usage and adoption in the intelligent vehicles era.},\n bibtype = {inproceedings},\n author = {Siopi, M. and Ellinoudis, D. and Pratikakis, I. and Amanatiadis, A.},\n doi = {10.1109/COINS57856.2023.10189236},\n booktitle = {2023 IEEE International Conference on Omni-Layer Intelligent Systems, COINS 2023}\n}
\n
\n\n\n
\n Prior high definition maps are currently required in the perception stack of autonomous vehicles since they encode road-level features in centimetre accuracy. This rapid map establishment has led to a vast range of different map vendors, formats, structures and standards. In this study, the landscape of high-definition maps is being analyzed, evaluated and compared. Unlike other similar works, the maps in this study are categorized by their formats and structures since map vendors have started to provide single structure maps in multiple formats. Although this wide availability helps to expand the map usage, many aspects have to be considered and evaluated for choosing the most appropriate one. Apart from the detailed review, key issues and a discussion concerning the use of prior maps is provided to facilitate better map usage and adoption in the intelligent vehicles era.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n A cooperative LiDAR-camera scheme for extrinsic calibration.\n \n \n\n\n \n Zamanakos, G., Tsochatzidis, L., Amanatiadis, A., & Pratikakis, I.\n\n\n \n\n\n\n In IVMSP 2022 - 2022 IEEE 14th Image, Video, and Multidimensional Signal Processing Workshop, 2022. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A cooperative LiDAR-camera scheme for extrinsic calibration},\n type = {inproceedings},\n year = {2022},\n id = {50e642a5-4e6e-38fe-9458-ab081ae22341},\n created = {2024-03-30T13:53:00.438Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.438Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, a method for LiDAR-camera extrinsic calibration from multiple static scenes is proposed, using a simple design for calibration target with an ArUco marker. Towards this end, a novel LiDAR-camera cooperative scheme is employed. At first, the camera-based detection of the marker guides a processing of the LiDAR point cloud to detect the 3D marker in it. Once the marker has been accurately localized in the LiDAR point cloud, further correction takes place regarding the pose estimation of the marker. In this way, the advantages of each sensor are used to improve marker localization. The improved accuracy achieved in the computation of the extrinsic calibration parameters has been experimentally shown in both quantitative and qualitative terms.},\n bibtype = {inproceedings},\n author = {Zamanakos, G. and Tsochatzidis, L. and Amanatiadis, A. and Pratikakis, I.},\n doi = {10.1109/IVMSP54334.2022.9816356},\n booktitle = {IVMSP 2022 - 2022 IEEE 14th Image, Video, and Multidimensional Signal Processing Workshop}\n}
\n
\n\n\n
\n In this paper, a method for LiDAR-camera extrinsic calibration from multiple static scenes is proposed, using a simple design for calibration target with an ArUco marker. Towards this end, a novel LiDAR-camera cooperative scheme is employed. At first, the camera-based detection of the marker guides a processing of the LiDAR point cloud to detect the 3D marker in it. Once the marker has been accurately localized in the LiDAR point cloud, further correction takes place regarding the pose estimation of the marker. In this way, the advantages of each sensor are used to improve marker localization. The improved accuracy achieved in the computation of the extrinsic calibration parameters has been experimentally shown in both quantitative and qualitative terms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n How an Educational Robot Should Look Like: The Students' Perspective.\n \n \n\n\n \n Kyprianou, G., Karousou, A., Makris, N., Sarafis, I., Chatzichristofis, S., & Amanatiadis, A.\n\n\n \n\n\n\n In Digest of Technical Papers - IEEE International Conference on Consumer Electronics, volume 2022-Janua, 2022. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {How an Educational Robot Should Look Like: The Students' Perspective},\n type = {inproceedings},\n year = {2022},\n keywords = {Educational Robotics,Robot Interaction,Robot Morphology,Robot socialization,STEAM},\n volume = {2022-Janua},\n id = {ed914934-9ee6-36a2-aa55-f8bfeeca1888},\n created = {2024-03-30T13:53:00.448Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.448Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In the last decade, there is a growing demand for educational robots in consumer electronics. This can be observed from the high promotion educational robots get from crowd-funding platforms, where more than 2000 projects have been funded successfully. Lego, Robolink, Hanson Robotics, Modular Robotics, and Primo Toys are only a few companies that disrupt education. Using robotic platforms like these, students now learn through experiences and structure their knowledge under the Constructivist approaches that modern education promotes. The teacher is not the leading actor anymore but more like a guide on the students' path on learning development. The robotic platform is now a vital aspect of the students' development. Thus, huge research is being undertaken on each company's construction, characteristics, and morphology to give to their product. This should not come in random, but on the contrary should be examined under the requirements of the ones they will use it, the students/children. The main objective of this paper is to investigate the characteristics a robot should have and the way of interaction and socialization with it, as the students are seeking and expecting it. This could provide the attributes and standards of constructing the perfect model for the students to acquire what they are pursuing in a more effective, exciting manner without losing their motivation.},\n bibtype = {inproceedings},\n author = {Kyprianou, G. and Karousou, A. and Makris, N. and Sarafis, I. and Chatzichristofis, S.A. and Amanatiadis, A.},\n doi = {10.1109/ICCE53296.2022.9730596},\n booktitle = {Digest of Technical Papers - IEEE International Conference on Consumer Electronics}\n}
\n
\n\n\n
\n In the last decade, there is a growing demand for educational robots in consumer electronics. This can be observed from the high promotion educational robots get from crowd-funding platforms, where more than 2000 projects have been funded successfully. Lego, Robolink, Hanson Robotics, Modular Robotics, and Primo Toys are only a few companies that disrupt education. Using robotic platforms like these, students now learn through experiences and structure their knowledge under the Constructivist approaches that modern education promotes. The teacher is not the leading actor anymore but more like a guide on the students' path on learning development. The robotic platform is now a vital aspect of the students' development. Thus, huge research is being undertaken on each company's construction, characteristics, and morphology to give to their product. This should not come in random, but on the contrary should be examined under the requirements of the ones they will use it, the students/children. The main objective of this paper is to investigate the characteristics a robot should have and the way of interaction and socialization with it, as the students are seeking and expecting it. This could provide the attributes and standards of constructing the perfect model for the students to acquire what they are pursuing in a more effective, exciting manner without losing their motivation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Educational Robotics in the Service of the Gestalt Similarity Principle.\n \n \n\n\n \n Sophokleous, A., Amanatiadis, A., Gkelios, S., & Chatzichristofis, S.\n\n\n \n\n\n\n In Digest of Technical Papers - IEEE International Conference on Consumer Electronics, volume 2022-Janua, 2022. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Educational Robotics in the Service of the Gestalt Similarity Principle},\n type = {inproceedings},\n year = {2022},\n keywords = {Computer Vision,Educational Robotics,Gestalt Similarity Principle,Image Retrieval},\n volume = {2022-Janua},\n id = {6d194e37-b429-3b6c-b010-84adb8557016},\n created = {2024-03-30T13:53:00.493Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.493Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Today, many technological approaches, including educational robotics (ER), enrich the teaching process through gamification. Several studies have shown that educational activities with robots lead to increased student interest, a positive and more effective learning process, and several different skills. This paper adopts a content-based image retrieval mechanism to automate the Gestalt similarity testing process and evaluates the impact of the involvement of a humanoid robot. The proposed framework aims to improve participants' visual perception, cultivate their creativity, and improve their visual working memory. During a pilot study, the participants communicate with the proposed framework either by using a tablet or by interacting with a humanoid NAO robot. The experimental results showed that the participation of NAO significantly increased the interest, attention, and commitment of the students.},\n bibtype = {inproceedings},\n author = {Sophokleous, A. and Amanatiadis, A. and Gkelios, S. and Chatzichristofis, S.A.},\n doi = {10.1109/ICCE53296.2022.9730479},\n booktitle = {Digest of Technical Papers - IEEE International Conference on Consumer Electronics}\n}
\n
\n\n\n
\n Today, many technological approaches, including educational robotics (ER), enrich the teaching process through gamification. Several studies have shown that educational activities with robots lead to increased student interest, a positive and more effective learning process, and several different skills. This paper adopts a content-based image retrieval mechanism to automate the Gestalt similarity testing process and evaluates the impact of the involvement of a humanoid robot. The proposed framework aims to improve participants' visual perception, cultivate their creativity, and improve their visual working memory. During a pilot study, the participants communicate with the proposed framework either by using a tablet or by interacting with a humanoid NAO robot. The experimental results showed that the participation of NAO significantly increased the interest, attention, and commitment of the students.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Word spotting as a service: an unsupervised and segmentation-free framework for handwritten documents.\n \n \n\n\n \n Zagoris, K., Amanatiadis, A., & Pratikakis, I.\n\n\n \n\n\n\n Journal of Imaging, 7(12). 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Word spotting as a service: an unsupervised and segmentation-free framework for handwritten documents},\n type = {article},\n year = {2021},\n keywords = {Cloud service,Document-oriented features,Handwritten documents,Indexing,Word spotting},\n volume = {7},\n id = {3f448923-8f19-3b1c-b925-223f3a700299},\n created = {2024-03-30T13:53:00.516Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.516Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Word spotting strategies employed in historical handwritten documents face many challenges due to variation in the writing style and intense degradation. In this paper, a new method that permits efficient and effective word spotting in handwritten documents is presented that relies upon document-oriented local features that take into account information around representative keypoints and a matching process that incorporates a spatial context in a local proximity search without using any training data. The method relies on a document-oriented keypoint and feature extraction, along with a fast feature matching method. This enables the corresponding methodological pipeline to be both effectively and efficiently employed in the cloud so that word spotting can be realised as a service in modern mobile devices. The effectiveness and efficiency of the proposed method in terms of its matching accuracy, along with its fast retrieval time, respectively, are shown after a consistent evaluation of several historical handwritten datasets.},\n bibtype = {article},\n author = {Zagoris, K. and Amanatiadis, A. and Pratikakis, I.},\n doi = {10.3390/jimaging7120278},\n journal = {Journal of Imaging},\n number = {12}\n}
\n
\n\n\n
\n Word spotting strategies employed in historical handwritten documents face many challenges due to variation in the writing style and intense degradation. In this paper, a new method that permits efficient and effective word spotting in handwritten documents is presented that relies upon document-oriented local features that take into account information around representative keypoints and a matching process that incorporates a spatial context in a local proximity search without using any training data. The method relies on a document-oriented keypoint and feature extraction, along with a fast feature matching method. This enables the corresponding methodological pipeline to be both effectively and efficiently employed in the cloud so that word spotting can be realised as a service in modern mobile devices. The effectiveness and efficiency of the proposed method in terms of its matching accuracy, along with its fast retrieval time, respectively, are shown after a consistent evaluation of several historical handwritten datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Marsexplorer: Exploration of unknown terrains via deep reinforcement learning and procedurally generated environments.\n \n \n\n\n \n Koutras, D., Kapoutsis, A., Amanatiadis, A., & Kosmatopoulos, E.\n\n\n \n\n\n\n Electronics (Switzerland), 10(22). 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Marsexplorer: Exploration of unknown terrains via deep reinforcement learning and procedurally generated environments},\n type = {article},\n year = {2021},\n keywords = {Deep Reinforcement Learning,Exploration,OpenAI gym,Unknown terrains},\n volume = {10},\n id = {c38ecc48-3be6-3012-8f06-c1c1caaa3245},\n created = {2024-03-30T13:53:00.632Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.632Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper is an initial endeavor to bridge the gap between powerful Deep Reinforcement Learning methodologies and the problem of exploration/coverage of unknown terrains. Within this scope, MarsExplorer, an openai-gym compatible environment tailored to exploration/coverage of unknown areas, is presented. MarsExplorer translates the original robotics problem into a Reinforcement Learning setup that various off-the-shelf algorithms can tackle. Any learned policy can be straightforwardly applied to a robotic platform without an elaborate simulation model of the robot’s dynamics to apply a different learning/adaptation phase. One of its core features is the controllable multi-dimensional procedural generation of terrains, which is the key for producing policies with strong generalization capabilities. Four different state-of-the-art RL algorithms (A3C, PPO, Rainbow, and SAC) are trained on the MarsExplorer environment, and a proper evaluation of their results compared to the average human-level performance is reported. In the follow-up experimental analysis, the effect of the multi-dimensional difficulty setting on the learning capabilities of the best-performing algorithm (PPO) is analyzed. A milestone result is the generation of an exploration policy that follows the Hilbert curve without providing this information to the environment or rewarding directly or indirectly Hilbert-curve-like trajectories. The experimental analysis is concluded by evaluating PPO learned policy algorithm side-by-side with frontier-based exploration strategies. A study on the performance curves revealed that PPO-based policy was capable of performing adaptive-to-the-unknown-terrain sweeping without leaving expensive-to-revisit areas uncovered, underlying the capability of RL-based methodologies to tackle exploration tasks efficiently.},\n bibtype = {article},\n author = {Koutras, D.I. and Kapoutsis, A.C. and Amanatiadis, A.A. and Kosmatopoulos, E.B.},\n doi = {10.3390/electronics10222751},\n journal = {Electronics (Switzerland)},\n number = {22}\n}
\n
\n\n\n
\n This paper is an initial endeavor to bridge the gap between powerful Deep Reinforcement Learning methodologies and the problem of exploration/coverage of unknown terrains. Within this scope, MarsExplorer, an openai-gym compatible environment tailored to exploration/coverage of unknown areas, is presented. MarsExplorer translates the original robotics problem into a Reinforcement Learning setup that various off-the-shelf algorithms can tackle. Any learned policy can be straightforwardly applied to a robotic platform without an elaborate simulation model of the robot’s dynamics to apply a different learning/adaptation phase. One of its core features is the controllable multi-dimensional procedural generation of terrains, which is the key for producing policies with strong generalization capabilities. Four different state-of-the-art RL algorithms (A3C, PPO, Rainbow, and SAC) are trained on the MarsExplorer environment, and a proper evaluation of their results compared to the average human-level performance is reported. In the follow-up experimental analysis, the effect of the multi-dimensional difficulty setting on the learning capabilities of the best-performing algorithm (PPO) is analyzed. A milestone result is the generation of an exploration policy that follows the Hilbert curve without providing this information to the environment or rewarding directly or indirectly Hilbert-curve-like trajectories. The experimental analysis is concluded by evaluating PPO learned policy algorithm side-by-side with frontier-based exploration strategies. A study on the performance curves revealed that PPO-based policy was capable of performing adaptive-to-the-unknown-terrain sweeping without leaving expensive-to-revisit areas uncovered, underlying the capability of RL-based methodologies to tackle exploration tasks efficiently.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Real-time semantic image segmentation with deep learning for autonomous driving: A survey.\n \n \n\n\n \n Papadeas, I., Tsochatzidis, L., Amanatiadis, A., & Pratikakis, I.\n\n\n \n\n\n\n Applied Sciences (Switzerland), 11(19). 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Real-time semantic image segmentation with deep learning for autonomous driving: A survey},\n type = {article},\n year = {2021},\n keywords = {Autonomous driving,Deep learning,Real time,Semantic image segmentation},\n volume = {11},\n id = {05746097-4c26-3e05-8e85-4bd8de061339},\n created = {2024-03-30T13:53:00.651Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.651Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Semantic image segmentation for autonomous driving is a challenging task due to its requirement for both effectiveness and efficiency. Recent developments in deep learning have demonstrated important performance boosting in terms of accuracy. In this paper, we present a comprehensive overview of the state-of-the-art semantic image segmentation methods using deep-learning techniques aiming to operate in real time so that can efficiently support an autonomous driving scenario. To this end, the presented overview puts a particular emphasis on the presentation of all those approaches which permit inference time reduction, while an analysis of the existing methods is addressed by taking into account their end-to-end functionality, as well as a comparative study that relies upon a consistent evaluation framework. Finally, a fruitful discussion is presented that provides key insights for the current trend and future research directions in real-time semantic image segmentation with deep learning for autonomous driving.},\n bibtype = {article},\n author = {Papadeas, I. and Tsochatzidis, L. and Amanatiadis, A. and Pratikakis, I.},\n doi = {10.3390/app11198802},\n journal = {Applied Sciences (Switzerland)},\n number = {19}\n}
\n
\n\n\n
\n Semantic image segmentation for autonomous driving is a challenging task due to its requirement for both effectiveness and efficiency. Recent developments in deep learning have demonstrated important performance boosting in terms of accuracy. In this paper, we present a comprehensive overview of the state-of-the-art semantic image segmentation methods using deep-learning techniques aiming to operate in real time so that can efficiently support an autonomous driving scenario. To this end, the presented overview puts a particular emphasis on the presentation of all those approaches which permit inference time reduction, while an analysis of the existing methods is addressed by taking into account their end-to-end functionality, as well as a comparative study that relies upon a consistent evaluation framework. Finally, a fruitful discussion is presented that provides key insights for the current trend and future research directions in real-time semantic image segmentation with deep learning for autonomous driving.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A comprehensive survey of LIDAR-based 3D object detection methods with deep learning for autonomous driving.\n \n \n\n\n \n Zamanakos, G., Tsochatzidis, L., Amanatiadis, A., & Pratikakis, I.\n\n\n \n\n\n\n Computers and Graphics (Pergamon), 99. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A comprehensive survey of LIDAR-based 3D object detection methods with deep learning for autonomous driving},\n type = {article},\n year = {2021},\n keywords = {3D Object detection,Autonomous driving,Deep learning},\n volume = {99},\n id = {1d4ecdbc-f96e-3e37-ad0e-f9f342cdf3be},\n created = {2024-03-30T13:53:00.689Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.689Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {LiDAR-based 3D object detection for autonomous driving has recently drawn the attention of both academia and industry since it relies upon a sensor that incorporates appealing features like insensitivity to light and capacity to capture the 3D spatial structure of an object along with the continuous reduction of its purchase cost. Furthermore, the emergence of Deep Learning as the means to boost performance in 3D data analysis stimulated the production of a multitude of solutions for LIDAR-based 3D object detection which followed different approaches in an effort to respond effectively to several challenges. In view of this, this paper presents a comprehensive survey of LIDAR-based 3D object detection methods wherein an analysis of existing methods is addressed by taking into account a new categorisation that relies upon a common operational pipeline which describes the end-to-end functionality of each method. We next, discuss the existing benchmarking frameworks and present the performance achieved by each method in each of them. Finally, a discussion is presented that provides key insights aiming to capture the essence of current trends in LIDAR-based 3D object detection.},\n bibtype = {article},\n author = {Zamanakos, G. and Tsochatzidis, L. and Amanatiadis, A. and Pratikakis, I.},\n doi = {10.1016/j.cag.2021.07.003},\n journal = {Computers and Graphics (Pergamon)}\n}
\n
\n\n\n
\n LiDAR-based 3D object detection for autonomous driving has recently drawn the attention of both academia and industry since it relies upon a sensor that incorporates appealing features like insensitivity to light and capacity to capture the 3D spatial structure of an object along with the continuous reduction of its purchase cost. Furthermore, the emergence of Deep Learning as the means to boost performance in 3D data analysis stimulated the production of a multitude of solutions for LIDAR-based 3D object detection which followed different approaches in an effort to respond effectively to several challenges. In view of this, this paper presents a comprehensive survey of LIDAR-based 3D object detection methods wherein an analysis of existing methods is addressed by taking into account a new categorisation that relies upon a common operational pipeline which describes the end-to-end functionality of each method. We next, discuss the existing benchmarking frameworks and present the performance achieved by each method in each of them. Finally, a discussion is presented that provides key insights aiming to capture the essence of current trends in LIDAR-based 3D object detection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Introducing Algorithmic Thinking and Sequencing Using Tangible Robots.\n \n \n\n\n \n Evripidou, S., Amanatiadis, A., Christodoulou, K., & A. Chatzichristofis, S.\n\n\n \n\n\n\n IEEE Transactions on Learning Technologies, 14(1). 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Introducing Algorithmic Thinking and Sequencing Using Tangible Robots},\n type = {article},\n year = {2021},\n keywords = {Algorithmic thinking,educational robots,sequencing,tangible programming},\n volume = {14},\n id = {69d27ee5-20c2-3e2e-b3b2-7c723186cbc1},\n created = {2024-03-30T13:53:00.745Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.745Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Today, in the era of robotics, different types of educational robots have been used extensively in school classrooms to facilitate teaching activities related to a variety of computer science concepts. Numerous studies have been performed that attempt to examine the effects of using tangible interfaces to enhance collaborative learning experiences. In most of these studies, feedback, which is a vital function for a successful game activity, is mainly provided by the trainers. However, this kind of feedback can be considered as static and general, while each trainee seeks clear, consistent, and even personalized feedback. This article proposes an interactive learning tool for introducing algorithmic thinking and sequencing using educational robots suitable for elementary and intermediate students. In more detail, in this article, we leverage a fuzzy-rule-based system and computer vision techniques to provide immediate, personalized feedback and recommendations to young students while they perform a series of activities using tangible robots. These activities relate to teaching programming skills and improve the algorithmic thinking of students. Experimental results revealed that participants were able to increase their algorithmic/programming thinking skills while developing a positive attitude toward programming. The interactive gaming factor that is embedded in the use of tangible robots, while participating in the activities, was proved to be a compelling and a rewarding experience. The article concludes that the use of the proposed feedback mechanism, when placed in a robot game environment, can lead to a positive and more effective learning process.},\n bibtype = {article},\n author = {Evripidou, S. and Amanatiadis, A. and Christodoulou, K. and A. Chatzichristofis, S.},\n doi = {10.1109/TLT.2021.3058060},\n journal = {IEEE Transactions on Learning Technologies},\n number = {1}\n}
\n
\n\n\n
\n Today, in the era of robotics, different types of educational robots have been used extensively in school classrooms to facilitate teaching activities related to a variety of computer science concepts. Numerous studies have been performed that attempt to examine the effects of using tangible interfaces to enhance collaborative learning experiences. In most of these studies, feedback, which is a vital function for a successful game activity, is mainly provided by the trainers. However, this kind of feedback can be considered as static and general, while each trainee seeks clear, consistent, and even personalized feedback. This article proposes an interactive learning tool for introducing algorithmic thinking and sequencing using educational robots suitable for elementary and intermediate students. In more detail, in this article, we leverage a fuzzy-rule-based system and computer vision techniques to provide immediate, personalized feedback and recommendations to young students while they perform a series of activities using tangible robots. These activities relate to teaching programming skills and improve the algorithmic thinking of students. Experimental results revealed that participants were able to increase their algorithmic/programming thinking skills while developing a positive attitude toward programming. The interactive gaming factor that is embedded in the use of tangible robots, while participating in the activities, was proved to be a compelling and a rewarding experience. The article concludes that the use of the proposed feedback mechanism, when placed in a robot game environment, can lead to a positive and more effective learning process.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Word Spotting as a Service for Handwritten Documents.\n \n \n\n\n \n Amanatiadis, A., Zagoris, K., & Pratikakis, I.\n\n\n \n\n\n\n In Digest of Technical Papers - IEEE International Conference on Consumer Electronics, volume 2021-Janua, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Word Spotting as a Service for Handwritten Documents},\n type = {inproceedings},\n year = {2021},\n volume = {2021-Janua},\n id = {5f9ebb01-3126-3dd6-99a5-ca399dbfc8af},\n created = {2024-03-30T13:53:00.758Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.758Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, a segmentation-free and training-free word spotting method is proposed that allows mobile device users to search instances of a query word in handwritten document collections. The method is based on a document-oriented keypoint and feature extraction pipeline together with a fast feature matching method that permits the pipeline to be effectively employed in the cloud and to be introduced as a service in modern mobile device units. Evaluation results on two segmentation-free historical handwritten datasets show the efficiency of the proposed method in terms of matching accuracy along with its fast retrieval time.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Zagoris, K. and Pratikakis, I.},\n doi = {10.1109/ICCE50685.2021.9427583},\n booktitle = {Digest of Technical Papers - IEEE International Conference on Consumer Electronics}\n}
\n
\n\n\n
\n In this paper, a segmentation-free and training-free word spotting method is proposed that allows mobile device users to search instances of a query word in handwritten document collections. The method is based on a document-oriented keypoint and feature extraction pipeline together with a fast feature matching method that permits the pipeline to be effectively employed in the cloud and to be introduced as a service in modern mobile device units. Evaluation results on two segmentation-free historical handwritten datasets show the efficiency of the proposed method in terms of matching accuracy along with its fast retrieval time.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Deep Learning Inference at the Edge for Mobile and Aerial Robotics.\n \n \n\n\n \n Faniadis, E., & Amanatiadis, A.\n\n\n \n\n\n\n In 2020 IEEE International Symposium on Safety, Security, and Rescue Robotics, SSRR 2020, 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Deep Learning Inference at the Edge for Mobile and Aerial Robotics},\n type = {inproceedings},\n year = {2020},\n id = {846be9b9-86f7-3672-8046-2ee4b1ace061},\n created = {2024-03-30T13:53:00.802Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.802Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Deep learning inference is an established element for autonomous robots especially in the domain of safety, security, and rescue applications. Autonomous functions based on deep learning inference are considered the spearhead of such robots paving the way for increased demand in onboard computational resources and performance accuracy. Edge computing is steadily improved in terms of computational power but at the same time artificial neural networks are becoming even more deep and complex. To this end, the right selection between the on-board hardware platform and an efficient deep neural network is considered a challenging tradeoff issue in the autonomous system design. In this paper, we investigate the current landscape in deep learning inference at the edge by evaluating the requirements, challenges and available solutions for service-oriented architectures in the safety, security, and rescue domain. Current research directions and best optimization practices are discussed, enriched with computational and accuracy comparisons providing necessary insights for optimal system design.},\n bibtype = {inproceedings},\n author = {Faniadis, E. and Amanatiadis, A.},\n doi = {10.1109/SSRR50563.2020.9292575},\n booktitle = {2020 IEEE International Symposium on Safety, Security, and Rescue Robotics, SSRR 2020}\n}
\n
\n\n\n
\n Deep learning inference is an established element for autonomous robots especially in the domain of safety, security, and rescue applications. Autonomous functions based on deep learning inference are considered the spearhead of such robots paving the way for increased demand in onboard computational resources and performance accuracy. Edge computing is steadily improved in terms of computational power but at the same time artificial neural networks are becoming even more deep and complex. To this end, the right selection between the on-board hardware platform and an efficient deep neural network is considered a challenging tradeoff issue in the autonomous system design. In this paper, we investigate the current landscape in deep learning inference at the edge by evaluating the requirements, challenges and available solutions for service-oriented architectures in the safety, security, and rescue domain. Current research directions and best optimization practices are discussed, enriched with computational and accuracy comparisons providing necessary insights for optimal system design.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Social Robots in Special Education: Creating Dynamic Interactions for Optimal Experience.\n \n \n\n\n \n Amanatiadis, A., Kaburlasos, V., Dardani, C., Chatzichristofis, S., & Mitropoulos, A.\n\n\n \n\n\n\n IEEE Consumer Electronics Magazine, 9(3). 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Social Robots in Special Education: Creating Dynamic Interactions for Optimal Experience},\n type = {article},\n year = {2020},\n volume = {9},\n id = {1a10d574-4d15-3480-afb8-432099570186},\n created = {2024-03-30T13:53:00.819Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.819Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Recent research has proved the positive therapeutic impacts of robot interaction with children diagnosed with the autism spectrum condition. Until now, most of the evaluated treatment methods apply one-to-one sessions between a single robot and a child. This article explores the potential therapeutic effects of multirobot-assisted therapies, along with a proposed game playlist for pursuing such interactive sessions. Considerations for the therapeutic objectives, such as the improvement in social communication and interaction skills, joint attention, response inhibition, and cognitive flexibility are also explored. Finally, general procedures and guidelines are provided to further assist the adoption and use of such multirobot sessions.},\n bibtype = {article},\n author = {Amanatiadis, A. and Kaburlasos, V.G. and Dardani, C. and Chatzichristofis, S.A. and Mitropoulos, A.},\n doi = {10.1109/MCE.2019.2956218},\n journal = {IEEE Consumer Electronics Magazine},\n number = {3}\n}
\n
\n\n\n
\n Recent research has proved the positive therapeutic impacts of robot interaction with children diagnosed with the autism spectrum condition. Until now, most of the evaluated treatment methods apply one-to-one sessions between a single robot and a child. This article explores the potential therapeutic effects of multirobot-assisted therapies, along with a proposed game playlist for pursuing such interactive sessions. Considerations for the therapeutic objectives, such as the improvement in social communication and interaction skills, joint attention, response inhibition, and cognitive flexibility are also explored. Finally, general procedures and guidelines are provided to further assist the adoption and use of such multirobot sessions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Educational Robotics: Platforms, Competitions and Expected Learning Outcomes.\n \n \n\n\n \n Evripidou, S., Georgiou, K., Doitsidis, L., Amanatiadis, A., Zinonos, Z., & Chatzichristofis, S.\n\n\n \n\n\n\n IEEE Access, 8. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Educational Robotics: Platforms, Competitions and Expected Learning Outcomes},\n type = {article},\n year = {2020},\n keywords = {Educational robotics,educational platforms,educational robotics competitions,educational robotics learning outcomes},\n volume = {8},\n id = {6d40cdf8-9d8a-3e21-b717-57553e209b27},\n created = {2024-03-30T13:53:00.859Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.859Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Motivated by the recent explosion of interest around Educational Robotics (ER), this paper attempts to re-approach this area by suggesting new ways of thinking and exploring the related concepts. The contribution of the paper is fourfold. First, future readers can use this paper as a reference point for exploring the expected learning outcomes of educational robotics. From an exhaustive list of potential learning gains, we propose a set of six learning outcomes that can offer a starting point for a viable model for the design of robotic activities. Second, the paper aims to serve as a survey for the most recent ER platforms. Driven by the growing number of available robotics platforms, we have gathered the most recent ER kits. We also propose a new way to categorize the platforms, free from their manufacturers' vague age boundaries. The proposed categories, including No Code, Basic Code, and Advanced Code, are derived from the prior knowledge and the programming skills that a student needs to use them efficiently. Third, as the number of ER competitions, and tournaments increases in parallel with ER platforms' increase, the paper presents and analyses the most popular robotic events. Robotics competitions encourage participants to develop and showcase their skills while promoting specific learning outcomes. The paper aims to provide an overview of those structures and discuss their efficacy. Finally, the paper explores the educational aspects of the presented ER competitions and their correlation with the six proposed learning outcomes. This raises the question of which primary features compose a competition and achieve its' pedagogical goals. This paper is the first study that correlates potential learning gains with ER competitions to the best of our knowledge.},\n bibtype = {article},\n author = {Evripidou, S. and Georgiou, K. and Doitsidis, L. and Amanatiadis, A.A. and Zinonos, Z. and Chatzichristofis, S.A.},\n doi = {10.1109/ACCESS.2020.3042555},\n journal = {IEEE Access}\n}
\n
\n\n\n
\n Motivated by the recent explosion of interest around Educational Robotics (ER), this paper attempts to re-approach this area by suggesting new ways of thinking and exploring the related concepts. The contribution of the paper is fourfold. First, future readers can use this paper as a reference point for exploring the expected learning outcomes of educational robotics. From an exhaustive list of potential learning gains, we propose a set of six learning outcomes that can offer a starting point for a viable model for the design of robotic activities. Second, the paper aims to serve as a survey for the most recent ER platforms. Driven by the growing number of available robotics platforms, we have gathered the most recent ER kits. We also propose a new way to categorize the platforms, free from their manufacturers' vague age boundaries. The proposed categories, including No Code, Basic Code, and Advanced Code, are derived from the prior knowledge and the programming skills that a student needs to use them efficiently. Third, as the number of ER competitions, and tournaments increases in parallel with ER platforms' increase, the paper presents and analyses the most popular robotic events. Robotics competitions encourage participants to develop and showcase their skills while promoting specific learning outcomes. The paper aims to provide an overview of those structures and discuss their efficacy. Finally, the paper explores the educational aspects of the presented ER competitions and their correlation with the six proposed learning outcomes. This raises the question of which primary features compose a competition and achieve its' pedagogical goals. This paper is the first study that correlates potential learning gains with ER competitions to the best of our knowledge.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Memristive Circuits for the Simulation of the Earthquake Process.\n \n \n\n\n \n Tastzoglou, G., Ntinas, V., Georgoudas, I., Amanatiadis, A., & Sirakoulis, G.\n\n\n \n\n\n\n In 2019 8th International Conference on Modern Circuits and Systems Technologies, MOCAST 2019, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Memristive Circuits for the Simulation of the Earthquake Process},\n type = {inproceedings},\n year = {2019},\n keywords = {Circuit,Earthquake,Memristor,Simulation},\n id = {3e78eaea-7e71-3d26-b2cf-d76c48ba119c},\n created = {2024-03-30T13:53:00.876Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.876Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this study, a grid that consists of inductor-capacitor-memristor circuits has been developed to simulate earthquake propagation. The main advantage of the memristor device is its ability to remember its last state even when no voltage is applied to it. Due to this feature, the use of the memristor is favored in the proposed inductor-capacitor-memristor (LCM) circuit. The inductors and capacitors emulate the oscillation of the rocks, whereas the memristors engage the circuit's energy loss and act as the memory that the data affecting the earthquake propagation process is stored. In the context of this study, the proposed circuit model is designed on the LTspice platform. Furthermore, it is tested and validated with real seismic data. Preliminary results are quite encouraging regarding the response of the proposed model.},\n bibtype = {inproceedings},\n author = {Tastzoglou, G. and Ntinas, V. and Georgoudas, I.G. and Amanatiadis, A. and Sirakoulis, G.C.},\n doi = {10.1109/MOCAST.2019.8742062},\n booktitle = {2019 8th International Conference on Modern Circuits and Systems Technologies, MOCAST 2019}\n}
\n
\n\n\n
\n In this study, a grid that consists of inductor-capacitor-memristor circuits has been developed to simulate earthquake propagation. The main advantage of the memristor device is its ability to remember its last state even when no voltage is applied to it. Due to this feature, the use of the memristor is favored in the proposed inductor-capacitor-memristor (LCM) circuit. The inductors and capacitors emulate the oscillation of the rocks, whereas the memristors engage the circuit's energy loss and act as the memory that the data affecting the earthquake propagation process is stored. In the context of this study, the proposed circuit model is designed on the LTspice platform. Furthermore, it is tested and validated with real seismic data. Preliminary results are quite encouraging regarding the response of the proposed model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Vehicle Windshield Detection by Fast and Compact Encoder-Decoder FCN Architecture.\n \n \n\n\n \n Mountelos, A., Amanatiadis, A., Sirakoulis, G., & Kosmatopoulos, E.\n\n\n \n\n\n\n In 2019 8th International Conference on Modern Circuits and Systems Technologies, MOCAST 2019, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Vehicle Windshield Detection by Fast and Compact Encoder-Decoder FCN Architecture},\n type = {inproceedings},\n year = {2019},\n id = {92a77a47-c78f-3fd5-bc04-aaf9abeaaaca},\n created = {2024-03-30T13:53:00.931Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.931Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Vehicle semantic segmentation is critical in many advanced driving assistance systems, traffic management, and security surveillance systems. Most of such systems are deployed on low computational embedded systems located in the vehicles or in remote gantry and roadside poles. While fully convolutional networks have been proved to be a powerful classifier being able to make inference on every single pixel of the input image, they entail high computational costs even for the inference process. In this paper, a vehicle windshield semantic segmentation is proposed utilizing a fast and compact encoder-decoder architecture of a fully convolutional network implemented in a low-power embedded system. The performed qualitative and quantitative performance measurements exemplify a real-time portable embedded solution which is competitive in terms of performance and inference time.},\n bibtype = {inproceedings},\n author = {Mountelos, A. and Amanatiadis, A. and Sirakoulis, G. and Kosmatopoulos, E.B.},\n doi = {10.1109/MOCAST.2019.8741770},\n booktitle = {2019 8th International Conference on Modern Circuits and Systems Technologies, MOCAST 2019}\n}
\n
\n\n\n
\n Vehicle semantic segmentation is critical in many advanced driving assistance systems, traffic management, and security surveillance systems. Most of such systems are deployed on low computational embedded systems located in the vehicles or in remote gantry and roadside poles. While fully convolutional networks have been proved to be a powerful classifier being able to make inference on every single pixel of the input image, they entail high computational costs even for the inference process. In this paper, a vehicle windshield semantic segmentation is proposed utilizing a fast and compact encoder-decoder architecture of a fully convolutional network implemented in a low-power embedded system. The performed qualitative and quantitative performance measurements exemplify a real-time portable embedded solution which is competitive in terms of performance and inference time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n ViPED: On-road vehicle passenger detection for autonomous vehicles.\n \n \n\n\n \n Amanatiadis, A., Karakasis, E., Bampis, L., Ploumpis, S., & Gasteratos, A.\n\n\n \n\n\n\n Robotics and Autonomous Systems, 112. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {ViPED: On-road vehicle passenger detection for autonomous vehicles},\n type = {article},\n year = {2019},\n keywords = {ADAS,Autonomous vehicles,Driver information systems,Safety systems},\n volume = {112},\n id = {8dafc70d-ed64-3775-a6a1-8a68277f4e6c},\n created = {2024-03-30T13:53:00.945Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.945Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper is about detecting and counting the passengers of a tracking vehicle using on-car monocular vision. By having a model of nearby vehicle occupants, intelligent reasoning systems of autonomous cars will be provided with this additional knowledge needed in emergency situations such as those that many philosophers have recently raised. The on-road Vehicle PassengEr Detection (ViPED) system is based on the human perception model in terms of spatio-temporal reasoning, namely the slight movements of passenger shape silhouettes inside the cabin. The main challenges we face are the low light conditions of the cabin (no feature points), the subtle non-rigid motions of the occupants (possible artifactual transitions), and the puzzling discrimination problem of back or front seat occupants (lack of depth information inside the cabin). To overcome these challenges, we first track the detected car windshield and find the optimal affine warp. The registered windshield images are preprocessed in order to extract a feature matrix, which serves as input to a Convolutional Neural Network (CNN) for inferring the number and position of passengers. We demonstrate that our low-cost sensor system is able to detect in most cases successfully all the passengers in preceding moving vehicles at various distances and occupancies. Metrics and datasets are included for possible community future work on this new challenging task.},\n bibtype = {article},\n author = {Amanatiadis, A. and Karakasis, E. and Bampis, L. and Ploumpis, S. and Gasteratos, A.},\n doi = {10.1016/j.robot.2018.12.002},\n journal = {Robotics and Autonomous Systems}\n}
\n
\n\n\n
\n This paper is about detecting and counting the passengers of a tracking vehicle using on-car monocular vision. By having a model of nearby vehicle occupants, intelligent reasoning systems of autonomous cars will be provided with this additional knowledge needed in emergency situations such as those that many philosophers have recently raised. The on-road Vehicle PassengEr Detection (ViPED) system is based on the human perception model in terms of spatio-temporal reasoning, namely the slight movements of passenger shape silhouettes inside the cabin. The main challenges we face are the low light conditions of the cabin (no feature points), the subtle non-rigid motions of the occupants (possible artifactual transitions), and the puzzling discrimination problem of back or front seat occupants (lack of depth information inside the cabin). To overcome these challenges, we first track the detected car windshield and find the optimal affine warp. The registered windshield images are preprocessed in order to extract a feature matrix, which serves as input to a Convolutional Neural Network (CNN) for inferring the number and position of passengers. We demonstrate that our low-cost sensor system is able to detect in most cases successfully all the passengers in preceding moving vehicles at various distances and occupancies. Metrics and datasets are included for possible community future work on this new challenging task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n CoMo: a scale and rotation invariant compact composite moment-based descriptor for image retrieval.\n \n \n\n\n \n Vassou, S., Anagnostopoulos, N., Christodoulou, K., Amanatiadis, A., & Chatzichristofis, S.\n\n\n \n\n\n\n Multimedia Tools and Applications, 78(3). 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {CoMo: a scale and rotation invariant compact composite moment-based descriptor for image retrieval},\n type = {article},\n year = {2019},\n keywords = {Compact composite descriptors,Content based image retrieval,Low level features},\n volume = {78},\n id = {0c246471-0e8d-3cc8-998b-2c754bbb9a3e},\n created = {2024-03-30T13:53:00.997Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:00.997Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Low level features play a significant role in image retrieval. Image moments can effectively represent global information of image content while being invariant under translation, rotation, and scaling. This paper presents CoMo: a moment based composite and compact low-level descriptor that can be used effectively for image retrieval and robot vision tasks. The proposed descriptor is evaluated by employing the Bag-of-Visual-Words representation over various well-known benchmarking image databases. The findings from the experimental evaluation provide strong evidence of high and competitive retrieval performance against various state-of-the-art local descriptors.},\n bibtype = {article},\n author = {Vassou, S.A. and Anagnostopoulos, N. and Christodoulou, K. and Amanatiadis, A. and Chatzichristofis, S.A.},\n doi = {10.1007/s11042-018-5854-3},\n journal = {Multimedia Tools and Applications},\n number = {3}\n}
\n
\n\n\n
\n Low level features play a significant role in image retrieval. Image moments can effectively represent global information of image content while being invariant under translation, rotation, and scaling. This paper presents CoMo: a moment based composite and compact low-level descriptor that can be used effectively for image retrieval and robot vision tasks. The proposed descriptor is evaluated by employing the Bag-of-Visual-Words representation over various well-known benchmarking image databases. The findings from the experimental evaluation provide strong evidence of high and competitive retrieval performance against various state-of-the-art local descriptors.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Understanding deep convolutional networks through gestalt theory.\n \n \n\n\n \n Amanatiadis, A., Kaburlasos, V., & Kosmatopoulos, E.\n\n\n \n\n\n\n In IST 2018 - IEEE International Conference on Imaging Systems and Techniques, Proceedings, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Understanding deep convolutional networks through gestalt theory},\n type = {inproceedings},\n year = {2018},\n id = {2c2eb8b3-0c95-38f6-9c38-c683f0c9cb00},\n created = {2024-03-30T13:53:01.005Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.005Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {The superior performance of deep convolutional networks over high-dimensional problems have made them very popular for several applications. Despite their wide adoption, their underlying mechanisms still remain unclear with their improvement procedures still relying mainly on a trial and error process. We introduce a novel sensitivity analysis based on the Gestalt theory for giving insights into the classifier function and intermediate layers. Since Gestalt psychology stipulates that perception can be a product of complex interactions among several elements, we perform an ablation study based on this concept to discover which principles and image context significantly contribute in the network classification. Our results reveal that convnets follow most of the visual cortical perceptual mechanisms defined by the Gestalt principles at several levels. The proposed framework stimulates specific feature maps in classification problems and reveal important network attributes that can produce more explainable network models.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Kaburlasos, V.G. and Kosmatopoulos, E.B.},\n doi = {10.1109/IST.2018.8577159},\n booktitle = {IST 2018 - IEEE International Conference on Imaging Systems and Techniques, Proceedings}\n}
\n
\n\n\n
\n The superior performance of deep convolutional networks over high-dimensional problems have made them very popular for several applications. Despite their wide adoption, their underlying mechanisms still remain unclear with their improvement procedures still relying mainly on a trial and error process. We introduce a novel sensitivity analysis based on the Gestalt theory for giving insights into the classifier function and intermediate layers. Since Gestalt psychology stipulates that perception can be a product of complex interactions among several elements, we perform an ablation study based on this concept to discover which principles and image context significantly contribute in the network classification. Our results reveal that convnets follow most of the visual cortical perceptual mechanisms defined by the Gestalt principles at several levels. The proposed framework stimulates specific feature maps in classification problems and reveal important network attributes that can produce more explainable network models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Interpolation kernels in fully convolutional networks and their effect in robot vision tasks.\n \n \n\n\n \n Amanatiadis, A., Kaburlasos, V., & Kosmatopoulos, E.\n\n\n \n\n\n\n In IST 2018 - IEEE International Conference on Imaging Systems and Techniques, Proceedings, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Interpolation kernels in fully convolutional networks and their effect in robot vision tasks},\n type = {inproceedings},\n year = {2018},\n id = {31ebb59c-0a6b-31c6-aae2-7b553548c14d},\n created = {2024-03-30T13:53:01.053Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.053Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Fully convolutional networks have been proved to be a powerful classifier being able to make inference on every single pixel of the input image. One of the key elements of such networks is the up-sampling procedure in the decoder of the network where the coarse output is connected with the dense pixels by using interpolation kernels in several layers of the decoder. Depending on the selected kernel, the final segmentation accuracy is affected even for fixed weight kernels. In this paper, various interpolation kernels are evaluated against network accuracy and time performance in several robot vision tasks. The performed quantitative assessments provide several insights for the best trade-offs between competitive performance and inference time.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Kaburlasos, V.G. and Kosmatopoulos, E.B.},\n doi = {10.1109/IST.2018.8577126},\n booktitle = {IST 2018 - IEEE International Conference on Imaging Systems and Techniques, Proceedings}\n}
\n
\n\n\n
\n Fully convolutional networks have been proved to be a powerful classifier being able to make inference on every single pixel of the input image. One of the key elements of such networks is the up-sampling procedure in the decoder of the network where the coarse output is connected with the dense pixels by using interpolation kernels in several layers of the decoder. Depending on the selected kernel, the final segmentation accuracy is affected even for fixed weight kernels. In this paper, various interpolation kernels are evaluated against network accuracy and time performance in several robot vision tasks. The performed quantitative assessments provide several insights for the best trade-offs between competitive performance and inference time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A LoCATe-based visual place recognition system for mobile robotics and GPGPUs.\n \n \n\n\n \n Bampis, L., Chatzichristofis, S., Iakovidou, C., Amanatiadis, A., Boutalis, Y., & Gasteratos, A.\n\n\n \n\n\n\n Concurrency and Computation: Practice and Experience, 30(7). 2018.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A LoCATe-based visual place recognition system for mobile robotics and GPGPUs},\n type = {article},\n year = {2018},\n keywords = {GPGPU Computing,Image Description,Mobile Robotics,Visual Place Recognition},\n volume = {30},\n id = {ad312ea6-7d8b-3e20-a5b6-980cb7916309},\n created = {2024-03-30T13:53:01.110Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.110Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, a novel visual Place Recognition approach is evaluated based on a visual vocabulary of the Color and Edge Directivity Descriptor (CEDD) to address the loop closure detection task. Even though CEDD was initially designed so as to globally describe the color and texture information of an input image addressing Image Indexing and Retrieval tasks, its scalability on characterizing single feature points has already been proven. Thus, instead of using CEDD as a global descriptor, we adopt a bottom-up approach and use its localized version, Local Color And Texture dEscriptor, as an input to a state-of-the-art visual Place Recognition technique based on Visual Word Vectors. Also, we use a parallel execution pipeline based on a previous work of ours using the well established General Purpose Graphics Processing Unit (GPGPU) computing. Our experiments show that the usage of CEDD as a local descriptor produces high accuracy visual Place Recognition results, while the parallelization used allows for a real-time implementation even in the case of a low-cost mobile device.},\n bibtype = {article},\n author = {Bampis, L. and Chatzichristofis, S. and Iakovidou, C. and Amanatiadis, A. and Boutalis, Y. and Gasteratos, A.},\n doi = {10.1002/cpe.4146},\n journal = {Concurrency and Computation: Practice and Experience},\n number = {7}\n}
\n
\n\n\n
\n In this paper, a novel visual Place Recognition approach is evaluated based on a visual vocabulary of the Color and Edge Directivity Descriptor (CEDD) to address the loop closure detection task. Even though CEDD was initially designed so as to globally describe the color and texture information of an input image addressing Image Indexing and Retrieval tasks, its scalability on characterizing single feature points has already been proven. Thus, instead of using CEDD as a global descriptor, we adopt a bottom-up approach and use its localized version, Local Color And Texture dEscriptor, as an input to a state-of-the-art visual Place Recognition technique based on Visual Word Vectors. Also, we use a parallel execution pipeline based on a previous work of ours using the well established General Purpose Graphics Processing Unit (GPGPU) computing. Our experiments show that the usage of CEDD as a local descriptor produces high accuracy visual Place Recognition results, while the parallelization used allows for a real-time implementation even in the case of a low-cost mobile device.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Real-time surveillance detection system for medium-altitude long-endurance unmanned aerial vehicles.\n \n \n\n\n \n Amanatiadis, A., Bampis, L., Karakasis, E., Gasteratos, A., & Sirakoulis, G.\n\n\n \n\n\n\n Concurrency and Computation: Practice and Experience, 30(7). 2018.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Real-time surveillance detection system for medium-altitude long-endurance unmanned aerial vehicles},\n type = {article},\n year = {2018},\n keywords = {aerial surveillance detection system, FPGA, GPGPU,},\n volume = {30},\n id = {11fc58c2-26a7-3d60-977e-37d0ec23d990},\n created = {2024-03-30T13:53:01.113Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.113Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {The detection of ambiguous objects, although challenging, is of great importance for any surveillance system and especially for an unmanned aerial vehicle, where the measurements are affected by the great observing distance. Wildfire outbursts and illegal migration are only some of the examples that such a system should distinguish and report to the appropriate authorities. More specifically, Southern European countries commonly suffer from those problems due to the mountainous terrain and thick forests that contain. Unmanned aerial vehicles like the “Hellenic Civil Unmanned Air Vehicle” project have been designed to address high-altitude detection tasks and patrol the borders and woodlands for any ambiguous activity. In this paper, a moment-based blob detection approach is proposed that uses the thermal footprint obtained from single infrared images and distinguishes human- or fire-sized and shaped figures. Our method is specifically designed so as to be appropriately integrated into hardware acceleration devices, such as General Purpose Computation on Graphics Processing Units (GPGPUs) and field programmable gate arrays, and takes full advantage of their respective parallelization capabilities succeeding real-time performances and energy efficiency. The timing evaluation of the proposed hardware accelerated algorithm's adaptations shows an achieved speedup of up to 7 times, as compared to a highly optimized CPU-only based version.},\n bibtype = {article},\n author = {Amanatiadis, A. and Bampis, L. and Karakasis, E.G. and Gasteratos, A. and Sirakoulis, G.},\n doi = {10.1002/cpe.4145},\n journal = {Concurrency and Computation: Practice and Experience},\n number = {7}\n}
\n
\n\n\n
\n The detection of ambiguous objects, although challenging, is of great importance for any surveillance system and especially for an unmanned aerial vehicle, where the measurements are affected by the great observing distance. Wildfire outbursts and illegal migration are only some of the examples that such a system should distinguish and report to the appropriate authorities. More specifically, Southern European countries commonly suffer from those problems due to the mountainous terrain and thick forests that contain. Unmanned aerial vehicles like the “Hellenic Civil Unmanned Air Vehicle” project have been designed to address high-altitude detection tasks and patrol the borders and woodlands for any ambiguous activity. In this paper, a moment-based blob detection approach is proposed that uses the thermal footprint obtained from single infrared images and distinguishes human- or fire-sized and shaped figures. Our method is specifically designed so as to be appropriately integrated into hardware acceleration devices, such as General Purpose Computation on Graphics Processing Units (GPGPUs) and field programmable gate arrays, and takes full advantage of their respective parallelization capabilities succeeding real-time performances and energy efficiency. The timing evaluation of the proposed hardware accelerated algorithm's adaptations shows an achieved speedup of up to 7 times, as compared to a highly optimized CPU-only based version.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Multi-robot engagement in special education: A preliminary study in autism.\n \n \n\n\n \n Kaburlasos, V., Dardani, C., Dimitrova, M., & Amanatiadis, A.\n\n\n \n\n\n\n In 2018 IEEE International Conference on Consumer Electronics, ICCE 2018, volume 2018-Janua, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Multi-robot engagement in special education: A preliminary study in autism},\n type = {inproceedings},\n year = {2018},\n volume = {2018-Janua},\n id = {578efda2-e6a4-34ed-b85a-5986ba401adc},\n created = {2024-03-30T13:53:01.174Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.174Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {An engagement of humanoid social robots in special education may raise ethics questions. Hence, the development of useful therapeutic treatments may be hindered. To circumvent the latter, we propose training a humanoid robot R1 toward simulating the behavior of a child. Then, use another humanoid robot R2 to operate on robot R1 toward developing an effective treatment in special education without raising ethics questions. Experiments here consider children with autism. Preliminary application results have been encouraging. This work also suggests a novel social robot modeling based on gestalt representations.},\n bibtype = {inproceedings},\n author = {Kaburlasos, V.G. and Dardani, C. and Dimitrova, M. and Amanatiadis, A.},\n doi = {10.1109/ICCE.2018.8326267},\n booktitle = {2018 IEEE International Conference on Consumer Electronics, ICCE 2018}\n}
\n
\n\n\n
\n An engagement of humanoid social robots in special education may raise ethics questions. Hence, the development of useful therapeutic treatments may be hindered. To circumvent the latter, we propose training a humanoid robot R1 toward simulating the behavior of a child. Then, use another humanoid robot R2 to operate on robot R1 toward developing an effective treatment in special education without raising ethics questions. Experiments here consider children with autism. Preliminary application results have been encouraging. This work also suggests a novel social robot modeling based on gestalt representations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Fast loop-closure detection using visual-word-vectors from image sequences.\n \n \n\n\n \n Bampis, L., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n International Journal of Robotics Research, 37(1). 2018.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Fast loop-closure detection using visual-word-vectors from image sequences},\n type = {article},\n year = {2018},\n keywords = {Loop-closure detection,image sequences,low-power embedded systems,mobile robotics,visual SLAM},\n volume = {37},\n id = {5ece3763-c54c-3ec8-ab96-3ca985786e51},\n created = {2024-03-30T13:53:01.177Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.177Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, a novel pipeline for loop-closure detection is proposed. We base our work on a bag of binary feature words and we produce a description vector capable of characterizing a physical scene as a whole. Instead of relying on single camera measurements, the robot’s trajectory is dynamically segmented into image sequences according to its content. The visual word occurrences from each sequence are then combined to create sequence-visual-word-vectors and provide additional information to the matching functionality. In this way, scenes with considerable visual differences are firstly discarded, while the respective image-to-image associations are provided subsequently. With the purpose of further enhancing the system’s performance, a novel temporal consistency filter (trained offline) is also introduced to advance matches that persist over time. Evaluation results prove that the presented method compares favorably with other state-of-the-art techniques, while our algorithm is tested on a tablet device, verifying the computational efficiency of the approach.},\n bibtype = {article},\n author = {Bampis, L. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1177/0278364917740639},\n journal = {International Journal of Robotics Research},\n number = {1}\n}
\n
\n\n\n
\n In this paper, a novel pipeline for loop-closure detection is proposed. We base our work on a bag of binary feature words and we produce a description vector capable of characterizing a physical scene as a whole. Instead of relying on single camera measurements, the robot’s trajectory is dynamically segmented into image sequences according to its content. The visual word occurrences from each sequence are then combined to create sequence-visual-word-vectors and provide additional information to the matching functionality. In this way, scenes with considerable visual differences are firstly discarded, while the respective image-to-image associations are provided subsequently. With the purpose of further enhancing the system’s performance, a novel temporal consistency filter (trained offline) is also introduced to advance matches that persist over time. Evaluation results prove that the presented method compares favorably with other state-of-the-art techniques, while our algorithm is tested on a tablet device, verifying the computational efficiency of the approach.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Interactive social robots in special education.\n \n \n\n\n \n Amanatiadis, A., Kaburlasos, V., Dardani, C., & Chatzichristofis, S.\n\n\n \n\n\n\n In IEEE International Conference on Consumer Electronics - Berlin, ICCE-Berlin, volume 2017-Septe, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Interactive social robots in special education},\n type = {inproceedings},\n year = {2017},\n keywords = {Autism,Human-Robot Interaction,Humanoid Robots,Imitation Games,Lattice Computing,Special Education},\n volume = {2017-Septe},\n id = {e1325191-e179-37cc-9193-0f7f094c1752},\n created = {2024-03-30T13:53:01.229Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.229Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents advances in robot-Assisted special education by specially designed social interaction games. The therapeutic objectives include an improvement in social communication and interaction skills, joint attention, response inhibition and cognitive flexibility of children diagnosed with Autism Spectrum Condition (ASC). To achieve the aforementioned objectives, imitation games with humanoid robots are implemented. Preliminary application results suggest that robot-Assisted treatment can improve children behavior. Hence, an engagement of humanoid robots in special education is encouraged. Further improvements are planned by computational intelligence techniques toward increasing the humanoid robot autonomy.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Kaburlasos, V.G. and Dardani, C. and Chatzichristofis, S.A.},\n doi = {10.1109/ICCE-Berlin.2017.8210609},\n booktitle = {IEEE International Conference on Consumer Electronics - Berlin, ICCE-Berlin}\n}
\n
\n\n\n
\n This paper presents advances in robot-Assisted special education by specially designed social interaction games. The therapeutic objectives include an improvement in social communication and interaction skills, joint attention, response inhibition and cognitive flexibility of children diagnosed with Autism Spectrum Condition (ASC). To achieve the aforementioned objectives, imitation games with humanoid robots are implemented. Preliminary application results suggest that robot-Assisted treatment can improve children behavior. Hence, an engagement of humanoid robots in special education is encouraged. Further improvements are planned by computational intelligence techniques toward increasing the humanoid robot autonomy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n High order visual words for structure-aware and viewpoint-invariant loop closure detection.\n \n \n\n\n \n Bampis, L., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n In IEEE International Conference on Intelligent Robots and Systems, volume 2017-Septe, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {High order visual words for structure-aware and viewpoint-invariant loop closure detection},\n type = {inproceedings},\n year = {2017},\n volume = {2017-Septe},\n id = {dfbf65ca-96bf-3c45-b7e4-76667022b0cd},\n created = {2024-03-30T13:53:01.235Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.235Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In the field of loop closure detection, the most conventional approach is based on the Bag-of-Visual-Words (BoVW) image representation. Although well-established, this model rejects the spatial information regarding the local feature points' layout and performs the associations based only on their similarities. In this paper we propose a novel BoVW-based technique which additionally incorporates the operational environment's structure into the description, treating bunches of visual words with similar optical flow measurements as single similarity votes. The presented experimental results prove that our method offers superior loop closure detection accuracy while still ensuring real-time performance, even in the case of a low power consuming mobile device.},\n bibtype = {inproceedings},\n author = {Bampis, L. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1109/IROS.2017.8206289},\n booktitle = {IEEE International Conference on Intelligent Robots and Systems}\n}
\n
\n\n\n
\n In the field of loop closure detection, the most conventional approach is based on the Bag-of-Visual-Words (BoVW) image representation. Although well-established, this model rejects the spatial information regarding the local feature points' layout and performs the associations based only on their similarities. In this paper we propose a novel BoVW-based technique which additionally incorporates the operational environment's structure into the description, treating bunches of visual words with similar optical flow measurements as single similarity votes. The presented experimental results prove that our method offers superior loop closure detection accuracy while still ensuring real-time performance, even in the case of a low power consuming mobile device.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n CoMo: A compact composite moment-based descriptor for image retrieval.\n \n \n\n\n \n Vassou, S., Anagnostopoulos, N., Amanatiadis, A., Christodoulou, K., & Chatzichristofis, S.\n\n\n \n\n\n\n In ACM International Conference Proceeding Series, volume Part F1301, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {CoMo: A compact composite moment-based descriptor for image retrieval},\n type = {inproceedings},\n year = {2017},\n keywords = {Compact Composite Descriptors,Content Based Image Retrieval,Low level features},\n volume = {Part F1301},\n id = {c862b80b-8fd2-3054-9d3e-26509058d064},\n created = {2024-03-30T13:53:01.300Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.300Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Low level features play a vital role in image retrieval. Image moments can effectively represent global information of image content while being invariant under translation, rotation, and scaling. This paper briefly presents a moment based composite and compact lowlevel descriptor for image retrieval. In order to test the proposed feature, the authors employ the Bag-of-Visual-Words representation to perform experiments on two well-known benchmarking image databases. The robust and highly competitive retrieval performances, reported in all tested diverse collections, verify the promising potential that the proposed descriptor introduces.},\n bibtype = {inproceedings},\n author = {Vassou, S.A. and Anagnostopoulos, N. and Amanatiadis, A. and Christodoulou, K. and Chatzichristofis, S.A.},\n doi = {10.1145/3095713.3095744},\n booktitle = {ACM International Conference Proceeding Series}\n}
\n
\n\n\n
\n Low level features play a vital role in image retrieval. Image moments can effectively represent global information of image content while being invariant under translation, rotation, and scaling. This paper briefly presents a moment based composite and compact lowlevel descriptor for image retrieval. In order to test the proposed feature, the authors employ the Bag-of-Visual-Words representation to perform experiments on two well-known benchmarking image databases. The robust and highly competitive retrieval performances, reported in all tested diverse collections, verify the promising potential that the proposed descriptor introduces.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n A Multisensor Indoor Localization System for Biped Robots Operating in Industrial Environments.\n \n \n\n\n \n Amanatiadis, A.\n\n\n \n\n\n\n IEEE Transactions on Industrial Electronics, 63(12). 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A Multisensor Indoor Localization System for Biped Robots Operating in Industrial Environments},\n type = {article},\n year = {2016},\n keywords = {Cellular automata (CA),humanoid robots,localization,multisensor fusion,particle filter},\n volume = {63},\n id = {0a08ed30-2b99-3b32-8183-0d021d83f895},\n created = {2024-03-30T13:53:01.298Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.298Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper exploits three heterogeneous but complementary technologies along with a cellular automata particle filtering technique for enhancing localization positioning performance. The system is designed to meet challenging indoor operational conditions, thus different types of humanoid motions and industrial conditions were studied and evaluated according to qualitative attributes. In order to assess the proportional contribution of each navigation subsystem based on their accuracy, the extracted qualitative components are introduced to a particle filtering inference system. The final location estimate is calculated by applying different weights in the resampling stage depending on their accuracy performance for each time step. Experimental and simulation results have shown not only a reduced positioning error in terms of localization effectiveness but also a superior performance in several challenging industrial operational conditions. More accurate and real-time positioning is offered by the proposed method which can be also applied to any biped robot even in challenging operational conditions.},\n bibtype = {article},\n author = {Amanatiadis, A.},\n doi = {10.1109/TIE.2016.2590380},\n journal = {IEEE Transactions on Industrial Electronics},\n number = {12}\n}
\n
\n\n\n
\n This paper exploits three heterogeneous but complementary technologies along with a cellular automata particle filtering technique for enhancing localization positioning performance. The system is designed to meet challenging indoor operational conditions, thus different types of humanoid motions and industrial conditions were studied and evaluated according to qualitative attributes. In order to assess the proportional contribution of each navigation subsystem based on their accuracy, the extracted qualitative components are introduced to a particle filtering inference system. The final location estimate is calculated by applying different weights in the resampling stage depending on their accuracy performance for each time step. Experimental and simulation results have shown not only a reduced positioning error in terms of localization effectiveness but also a superior performance in several challenging industrial operational conditions. More accurate and real-time positioning is offered by the proposed method which can be also applied to any biped robot even in challenging operational conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Encoding the description of image sequences: A two-layered pipeline for loop closure detection.\n \n \n\n\n \n Bampis, L., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n In IEEE International Conference on Intelligent Robots and Systems, volume 2016-Novem, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Encoding the description of image sequences: A two-layered pipeline for loop closure detection},\n type = {inproceedings},\n year = {2016},\n volume = {2016-Novem},\n id = {a0b588b6-2634-36d9-9dfb-fdaf4fda2c0f},\n created = {2024-03-30T13:53:01.361Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.361Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper we propose a novel technique for detecting loop closures on a trajectory by matching sequences of images instead of single instances. We build upon well established techniques for creating a bag of visual words with a tree structure and we introduce a significant novelty by extending these notions to describe the visual information of entire regions using Visual-Word-Vectors. The fact that the proposed approach does not rely on a single image to recognize a site allows for a more robust place recognition, and consequently loop closure detection, while reduces the computational complexity for long trajectory cases. We present evaluation results for multiple publicly available indoor and outdoor datasets using Precision-Recall curves, which reveal that our method outperforms other state of the art algorithms.},\n bibtype = {inproceedings},\n author = {Bampis, L. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1109/IROS.2016.7759667},\n booktitle = {IEEE International Conference on Intelligent Robots and Systems}\n}
\n
\n\n\n
\n In this paper we propose a novel technique for detecting loop closures on a trajectory by matching sequences of images instead of single instances. We build upon well established techniques for creating a bag of visual words with a tree structure and we introduce a significant novelty by extending these notions to describe the visual information of entire regions using Visual-Word-Vectors. The fact that the proposed approach does not rely on a single image to recognize a site allows for a more robust place recognition, and consequently loop closure detection, while reduces the computational complexity for long trajectory cases. We present evaluation results for multiple publicly available indoor and outdoor datasets using Precision-Recall curves, which reveal that our method outperforms other state of the art algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Autonomous Vehicle Emergency Recovery Tool: A Cooperative Robotic System for Car Extraction.\n \n \n\n\n \n Amanatiadis, A., Charalampous, K., Kostavelis, I., Birkicht, B., Andel, B., Meiser, V., Henschel, C., Baugh, S., Paul, M., May, R., & Gasteratos, A.\n\n\n \n\n\n\n Journal of Field Robotics, 33(8). 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Autonomous Vehicle Emergency Recovery Tool: A Cooperative Robotic System for Car Extraction},\n type = {article},\n year = {2016},\n volume = {33},\n id = {21859a97-01e7-3506-b418-99f4afc93a6d},\n created = {2024-03-30T13:53:01.361Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.361Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Vehicles have been proven to be an ideal means for terrorists because they can be meticulously prepared well in advance before being deployed in urban and public places. To increase the risk and burden of explosive ordnance disposal teams, third-party vehicles have also been used to block the access path to the explosive loaded vehicle. In this paper, we present a multirobot system that can remove vehicles from confined spaces with delicate handling, swiftly and in any direction to a safer disposal point. The new lifting robots, capable of omnidirectional movement, autonomously underride the identified vehicle and dock to its wheels for a synchronized lifting and extraction. The validity and efficiency of the novel robotic system is illustrated via experiments in an indoor parking lot, demonstrating successful autonomous navigation, docking, lifting, and extraction of a conventional car for a total covered distance of 20 m.},\n bibtype = {article},\n author = {Amanatiadis, A. and Charalampous, K. and Kostavelis, I. and Birkicht, B. and Andel, B. and Meiser, V. and Henschel, C. and Baugh, S. and Paul, M. and May, R. and Gasteratos, A.},\n doi = {10.1002/rob.21593},\n journal = {Journal of Field Robotics},\n number = {8}\n}
\n
\n\n\n
\n Vehicles have been proven to be an ideal means for terrorists because they can be meticulously prepared well in advance before being deployed in urban and public places. To increase the risk and burden of explosive ordnance disposal teams, third-party vehicles have also been used to block the access path to the explosive loaded vehicle. In this paper, we present a multirobot system that can remove vehicles from confined spaces with delicate handling, swiftly and in any direction to a safer disposal point. The new lifting robots, capable of omnidirectional movement, autonomously underride the identified vehicle and dock to its wheels for a synchronized lifting and extraction. The validity and efficiency of the novel robotic system is illustrated via experiments in an indoor parking lot, demonstrating successful autonomous navigation, docking, lifting, and extraction of a conventional car for a total covered distance of 20 m.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A cellular automata based FPGA realization of a new metaheuristic bat-inspired algorithm.\n \n \n\n\n \n Progias, P., Amanatiadis, A., Spataro, W., Trunfio, G., & Sirakoulis, G.\n\n\n \n\n\n\n In AIP Conference Proceedings, volume 1776, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A cellular automata based FPGA realization of a new metaheuristic bat-inspired algorithm},\n type = {inproceedings},\n year = {2016},\n volume = {1776},\n id = {9f3d8f87-02b2-3f8e-b2f1-b0ac1f7d6c19},\n created = {2024-03-30T13:53:01.424Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.424Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Optimization algorithms are often inspired by processes occuring in nature, such as animal behavioral patterns. The main concern with implementing such algorithms in software is the large amounts of processing power they require. In contrast to software code, that can only perform calculations in a serial manner, an implementation in hardware, exploiting the inherent parallelism of single-purpose processors, can prove to be much more efficient both in speed and energy consumption. Furthermore, the use of Cellular Automata (CA) in such an implementation would be efficient both as a model for natural processes, as well as a computational paradigm implemented well on hardware. In this paper, we propose a VHDL implementation of a metaheuristic algorithm inspired by the echolocation behavior of bats. More specifically, the CA model is inspired by the metaheuristic algorithm proposed earlier in the literature, which could be considered at least as efficient than other existing optimization algorithms. The function of the FPGA implementation of our algorithm is explained in full detail and results of our simulations are also demonstrated.},\n bibtype = {inproceedings},\n author = {Progias, P. and Amanatiadis, A.A. and Spataro, W. and Trunfio, G.A. and Sirakoulis, G.Ch.},\n doi = {10.1063/1.4965359},\n booktitle = {AIP Conference Proceedings}\n}
\n
\n\n\n
\n Optimization algorithms are often inspired by processes occuring in nature, such as animal behavioral patterns. The main concern with implementing such algorithms in software is the large amounts of processing power they require. In contrast to software code, that can only perform calculations in a serial manner, an implementation in hardware, exploiting the inherent parallelism of single-purpose processors, can prove to be much more efficient both in speed and energy consumption. Furthermore, the use of Cellular Automata (CA) in such an implementation would be efficient both as a model for natural processes, as well as a computational paradigm implemented well on hardware. In this paper, we propose a VHDL implementation of a metaheuristic algorithm inspired by the echolocation behavior of bats. More specifically, the CA model is inspired by the metaheuristic algorithm proposed earlier in the literature, which could be considered at least as efficient than other existing optimization algorithms. The function of the FPGA implementation of our algorithm is explained in full detail and results of our simulations are also demonstrated.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n What, Where and How? Introducing pose manifolds for industrial object manipulation.\n \n \n\n\n \n Kouskouridas, R., Amanatiadis, A., Chatzichristofis, S., & Gasteratos, A.\n\n\n \n\n\n\n Expert Systems with Applications, 42(21). 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {What, Where and How? Introducing pose manifolds for industrial object manipulation},\n type = {article},\n year = {2015},\n keywords = {Object grasping,Object recognition,Ontology-based semantic categorization,Pose estimation},\n volume = {42},\n id = {2d12773b-6cef-3c31-a4ac-7aa756f2aa8d},\n created = {2024-03-30T13:53:01.426Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.426Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Abstract In this paper we propose a novel method for object grasping that aims to unify robot vision techniques for efficiently accomplishing the demanding task of autonomous object manipulation. Through ontological concepts, we establish three mutually complementary processes that lead to an integrated grasping system able to answer conjunctive queries such as "What", "Where" and "How"? For each query, the appropriate module provides the necessary output based on ontological formalities. The "What" is handled by a state of the art object recognition framework. A novel 6 DoF object pose estimation technique, which entails a bunch-based architecture and a manifold modeling method, answers the "Where". Last, "How" is addressed by an ontology-based semantic categorization enabling the sufficient mapping between visual stimuli and motor commands.},\n bibtype = {article},\n author = {Kouskouridas, R. and Amanatiadis, A. and Chatzichristofis, S.A. and Gasteratos, A.},\n doi = {10.1016/j.eswa.2015.06.039},\n journal = {Expert Systems with Applications},\n number = {21}\n}
\n
\n\n\n
\n Abstract In this paper we propose a novel method for object grasping that aims to unify robot vision techniques for efficiently accomplishing the demanding task of autonomous object manipulation. Through ontological concepts, we establish three mutually complementary processes that lead to an integrated grasping system able to answer conjunctive queries such as \"What\", \"Where\" and \"How\"? For each query, the appropriate module provides the necessary output based on ontological formalities. The \"What\" is handled by a state of the art object recognition framework. A novel 6 DoF object pose estimation technique, which entails a bunch-based architecture and a manifold modeling method, answers the \"Where\". Last, \"How\" is addressed by an ontology-based semantic categorization enabling the sufficient mapping between visual stimuli and motor commands.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A stereo matching approach based on particle filters and scattered control landmarks.\n \n \n\n\n \n Ploumpis, S., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n Image and Vision Computing, 38. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A stereo matching approach based on particle filters and scattered control landmarks},\n type = {article},\n year = {2015},\n keywords = {Ground control points,Markov chains,Particle filters,Plane fitting,Stereo matching},\n volume = {38},\n id = {0ea62e74-281c-3a84-92f9-0acf3313758f},\n created = {2024-03-30T13:53:01.482Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.482Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In robot localization, particle filtering can estimate the position of a robot in a known environment with the help of sensor data. In this paper, we present an approach based on particle filtering, for accurate stereo matching. The proposed method consists of three parts. First, we utilize multiple disparity maps in order to acquire a very distinctive set of features called landmarks, and then we use segmentation as a grouping technique. Secondly, we apply scan line particle filtering using the corresponding landmarks as a virtual sensor data to estimate the best disparity value. Lastly, we reduce the computational redundancy of particle filtering in our stereo correspondence with a Markov chain model, given the previous scan line values. More precisely, we assist particle filtering convergence by adding a proportional weight in the predicted disparity value estimated by Markov chains. In addition to this, we optimize our results by applying a plane fitting algorithm along with a histogram technique to refine any outliers. This work provides new insights into stereo matching methodologies by taking advantage of global geometrical and spatial information from distinctive landmarks. Experimental results show that our approach is capable of providing high-quality disparity maps comparable to other well-known contemporary techniques.},\n bibtype = {article},\n author = {Ploumpis, S. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1016/j.imavis.2015.04.001},\n journal = {Image and Vision Computing}\n}
\n
\n\n\n
\n In robot localization, particle filtering can estimate the position of a robot in a known environment with the help of sensor data. In this paper, we present an approach based on particle filtering, for accurate stereo matching. The proposed method consists of three parts. First, we utilize multiple disparity maps in order to acquire a very distinctive set of features called landmarks, and then we use segmentation as a grouping technique. Secondly, we apply scan line particle filtering using the corresponding landmarks as a virtual sensor data to estimate the best disparity value. Lastly, we reduce the computational redundancy of particle filtering in our stereo correspondence with a Markov chain model, given the previous scan line values. More precisely, we assist particle filtering convergence by adding a proportional weight in the predicted disparity value estimated by Markov chains. In addition to this, we optimize our results by applying a plane fitting algorithm along with a histogram technique to refine any outliers. This work provides new insights into stereo matching methodologies by taking advantage of global geometrical and spatial information from distinctive landmarks. Experimental results show that our approach is capable of providing high-quality disparity maps comparable to other well-known contemporary techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n AVERT: An autonomous multi-robot system for vehicle extraction and transportation.\n \n \n\n\n \n Amanatiadis, A., Henschel, C., Birkicht, B., Andel, B., Charalampous, K., Kostavelis, I., May, R., & Gasteratos, A.\n\n\n \n\n\n\n In Proceedings - IEEE International Conference on Robotics and Automation, volume 2015-June, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {AVERT: An autonomous multi-robot system for vehicle extraction and transportation},\n type = {inproceedings},\n year = {2015},\n volume = {2015-June},\n issue = {June},\n id = {8f212960-08a0-3448-8092-3d66c575dc3f},\n created = {2024-03-30T13:53:01.484Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.484Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents a multi-robot system for autonomous vehicle extraction and transportation based on the 'a-robot-for-a-wheel' concept. The developed prototype is able to extract vehicles from confined spaces with delicate handling, swiftly and in any direction. The novel lifting robots are capable of omnidirectional movement, thus they can under-ride the desired vehicle and dock to its wheels for a synchronized lifting and extraction. The overall developed system applies reasoning about available trajectory paths, wheel identification, local and undercarriage obstacle detection, in order to fully automate the process. The validity and efficiency of the AVERT robotic system is illustrated via experiments in an indoor parking lot, demonstrating successful autonomous navigation, docking, lifting and transportation of a conventional vehicle.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Henschel, C. and Birkicht, B. and Andel, B. and Charalampous, K. and Kostavelis, I. and May, R. and Gasteratos, A.},\n doi = {10.1109/ICRA.2015.7139411},\n booktitle = {Proceedings - IEEE International Conference on Robotics and Automation}\n}
\n
\n\n\n
\n This paper presents a multi-robot system for autonomous vehicle extraction and transportation based on the 'a-robot-for-a-wheel' concept. The developed prototype is able to extract vehicles from confined spaces with delicate handling, swiftly and in any direction. The novel lifting robots are capable of omnidirectional movement, thus they can under-ride the desired vehicle and dock to its wheels for a synchronized lifting and extraction. The overall developed system applies reasoning about available trajectory paths, wheel identification, local and undercarriage obstacle detection, in order to fully automate the process. The validity and efficiency of the AVERT robotic system is illustrated via experiments in an indoor parking lot, demonstrating successful autonomous navigation, docking, lifting and transportation of a conventional vehicle.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Image moment invariants as local features for content based image retrieval using the Bag-of-Visual-Words model.\n \n \n\n\n \n Karakasis, E., Amanatiadis, A., Gasteratos, A., & Chatzichristofis, S.\n\n\n \n\n\n\n Pattern Recognition Letters, 55. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Image moment invariants as local features for content based image retrieval using the Bag-of-Visual-Words model},\n type = {article},\n year = {2015},\n keywords = {Affine moment invariants,Bag of visual words,Content based image retrieval},\n volume = {55},\n id = {46f22170-0a35-3e8b-8677-65c05bbea108},\n created = {2024-03-30T13:53:01.542Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.542Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents an image retrieval framework that uses affine image moment invariants as descriptors of local image areas. Detailed feature vectors are generated by feeding the produced moments into a Bag-of-Visual-Words representation. Image moment invariants have been selected for their compact representation of image areas as well as due to their ability to remain unchanged under affine image transformations. Three different setups were examined in order to evaluate and discuss the overall approach. The retrieval results are promising compared with other widely used local descriptors, allowing the proposed framework to serve as a reference point for future image moment local descriptors applied to the general task of content based image retrieval.},\n bibtype = {article},\n author = {Karakasis, E.G. and Amanatiadis, A. and Gasteratos, A. and Chatzichristofis, S.A.},\n doi = {10.1016/j.patrec.2015.01.005},\n journal = {Pattern Recognition Letters}\n}
\n
\n\n\n
\n This paper presents an image retrieval framework that uses affine image moment invariants as descriptors of local image areas. Detailed feature vectors are generated by feeding the produced moments into a Bag-of-Visual-Words representation. Image moment invariants have been selected for their compact representation of image areas as well as due to their ability to remain unchanged under affine image transformations. Three different setups were examined in order to evaluate and discuss the overall approach. The retrieval results are promising compared with other widely used local descriptors, allowing the proposed framework to serve as a reference point for future image moment local descriptors applied to the general task of content based image retrieval.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A neural network-based approach for user experience assessment.\n \n \n\n\n \n Amanatiadis, A., Mitsinis, N., & Maditinos, D.\n\n\n \n\n\n\n Behaviour and Information Technology, 34(3). 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A neural network-based approach for user experience assessment},\n type = {article},\n year = {2015},\n keywords = {Asymmetry,Experience assessment,Neural networks,Nonlinearity,User satisfaction},\n volume = {34},\n id = {bd70a0c4-8ca4-3879-a0fd-18746d2411ae},\n created = {2024-03-30T13:53:01.549Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.549Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {The objective of this study is to approximate the links between user satisfaction and its determinants without having the restrictions of common statistical procedures such as linearity, symmetry and normality. For this reason, artificial neural networks are utilised and trained with the observations of an extensive survey on user satisfaction with respect to website attributes. Each observation includes evaluations about the performance of 18 specific and 9 general website attributes as well as an evaluation about overall user satisfaction. The analysis results indicate that website attributes present different impacts on satisfaction whereas the relationships found feature both asymmetry and nonlinearity. Finally, function approximation using neural networks is found to be appropriate for estimating such kind of relationships providing valuable information about satisfaction's formation.},\n bibtype = {article},\n author = {Amanatiadis, A. and Mitsinis, N. and Maditinos, D.},\n doi = {10.1080/0144929X.2014.921728},\n journal = {Behaviour and Information Technology},\n number = {3}\n}
\n
\n\n\n
\n The objective of this study is to approximate the links between user satisfaction and its determinants without having the restrictions of common statistical procedures such as linearity, symmetry and normality. For this reason, artificial neural networks are utilised and trained with the observations of an extensive survey on user satisfaction with respect to website attributes. Each observation includes evaluations about the performance of 18 specific and 9 general website attributes as well as an evaluation about overall user satisfaction. The analysis results indicate that website attributes present different impacts on satisfaction whereas the relationships found feature both asymmetry and nonlinearity. Finally, function approximation using neural networks is found to be appropriate for estimating such kind of relationships providing valuable information about satisfaction's formation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Real-time indexing for large image databases: color and edge directivity descriptor on GPU.\n \n \n\n\n \n Bampis, L., Iakovidou, C., Chatzichristofis, S., Boutalis, Y., & Amanatiadis, A.\n\n\n \n\n\n\n Journal of Supercomputing, 71(3). 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Real-time indexing for large image databases: color and edge directivity descriptor on GPU},\n type = {article},\n year = {2015},\n keywords = {Database indexing,Feature extraction,GPU,Hybrid implementation,Image retrieval},\n volume = {71},\n id = {5f76c84f-1e71-309b-8471-7d0922fe930e},\n created = {2024-03-30T13:53:01.604Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.604Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, we focus on implementing the extraction of a well-known low-level image descriptor using the multicore power provided by general-purpose graphic processing units (GPGPUs). The color and edge directivity descriptor, which incorporates both color and texture information achieving a successful trade-off between effectiveness and efficiency, is employed and reassessed for parallel execution. We are motivated by the fact that image/frame indexing should be achieved real time, which in our case means that a system should be capable of indexing a frame or an image as it becomes part of a database (ideally, calculating the descriptor as the images are captured). Two strategies are explored to accelerate the method and bypass resource limitations and architectural constrains. An approach that exclusively uses the GPU together with a hybrid implementation that distributes the computations to both available GPU and CPU resources are proposed. The first approach is strongly based on the compute unified device architecture and excels compared to all other solutions when the GPU resources are abundant. The second implementation suggests a hybrid scheme where the extraction process is split in two sequential stages, allowing the input data (images or video frames) to be pipelined through the central and the graphic processing units. Experimental results were conducted on four different combinations of GPU–CPU technologies in order to highlight the strengths and the weaknesses of all implementations. Real-time indexing is obtained over all computational setups for both GPU-only and Hybrid techniques. An impressive 22 times acceleration is recorded for the GPU-only method. The proposed Hybrid implementation outperforms the GPU-only implementation and becomes the preferred solution when a low-cost setup (i.e., more advanced CPU combined with a relatively weak GPU) is employed.},\n bibtype = {article},\n author = {Bampis, L. and Iakovidou, C. and Chatzichristofis, S.A. and Boutalis, Y.S. and Amanatiadis, A.},\n doi = {10.1007/s11227-014-1343-2},\n journal = {Journal of Supercomputing},\n number = {3}\n}
\n
\n\n\n
\n In this paper, we focus on implementing the extraction of a well-known low-level image descriptor using the multicore power provided by general-purpose graphic processing units (GPGPUs). The color and edge directivity descriptor, which incorporates both color and texture information achieving a successful trade-off between effectiveness and efficiency, is employed and reassessed for parallel execution. We are motivated by the fact that image/frame indexing should be achieved real time, which in our case means that a system should be capable of indexing a frame or an image as it becomes part of a database (ideally, calculating the descriptor as the images are captured). Two strategies are explored to accelerate the method and bypass resource limitations and architectural constrains. An approach that exclusively uses the GPU together with a hybrid implementation that distributes the computations to both available GPU and CPU resources are proposed. The first approach is strongly based on the compute unified device architecture and excels compared to all other solutions when the GPU resources are abundant. The second implementation suggests a hybrid scheme where the extraction process is split in two sequential stages, allowing the input data (images or video frames) to be pipelined through the central and the graphic processing units. Experimental results were conducted on four different combinations of GPU–CPU technologies in order to highlight the strengths and the weaknesses of all implementations. Real-time indexing is obtained over all computational setups for both GPU-only and Hybrid techniques. An impressive 22 times acceleration is recorded for the GPU-only method. The proposed Hybrid implementation outperforms the GPU-only implementation and becomes the preferred solution when a low-cost setup (i.e., more advanced CPU combined with a relatively weak GPU) is employed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Accelerating single-image super-resolution polynomial regression in mobile devices.\n \n \n\n\n \n Amanatiadis, A., Bampis, L., & Gasteratos, A.\n\n\n \n\n\n\n IEEE Transactions on Consumer Electronics, 61(1). 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Accelerating single-image super-resolution polynomial regression in mobile devices},\n type = {article},\n year = {2015},\n keywords = {Polynomial regression,general-purpose GPUs,hybrid implementation,super-resolution},\n volume = {61},\n id = {69f1b4e6-5db6-32ab-a2ed-50a2bdbc9bc5},\n created = {2024-03-30T13:53:01.608Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.608Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper introduces a new super-resolution algorithm based on machine learning along with a novel hybrid implementation for next generation mobile devices. The proposed super-resolution algorithm entails a two dimensional polynomial regression method using only the input image properties for the learning task. Model selection is applied for defining the optimal degree of polynomial by adopting regularization capability in order to avoid overfitting. Although it is widely believed that machine learning algorithms are not appropriate for real-time implementation, the paper in hand proves that there are indeed specific hypothesis representations that are able to be integrated into real-time mobile applications. With aim to achieve this goal, the increasing GPU employment in modern mobile devices is exploited. More precisely, by utilizing the mobile GPU as a co-processor in a hybrid pipelined implementation, significant performance speedup along with superior quantitative results can be achieved.},\n bibtype = {article},\n author = {Amanatiadis, A. and Bampis, L. and Gasteratos, A.},\n doi = {10.1109/TCE.2015.7064112},\n journal = {IEEE Transactions on Consumer Electronics},\n number = {1}\n}
\n
\n\n\n
\n This paper introduces a new super-resolution algorithm based on machine learning along with a novel hybrid implementation for next generation mobile devices. The proposed super-resolution algorithm entails a two dimensional polynomial regression method using only the input image properties for the learning task. Model selection is applied for defining the optimal degree of polynomial by adopting regularization capability in order to avoid overfitting. Although it is widely believed that machine learning algorithms are not appropriate for real-time implementation, the paper in hand proves that there are indeed specific hypothesis representations that are able to be integrated into real-time mobile applications. With aim to achieve this goal, the increasing GPU employment in modern mobile devices is exploited. More precisely, by utilizing the mobile GPU as a co-processor in a hybrid pipelined implementation, significant performance speedup along with superior quantitative results can be achieved.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Color and Edge Directivity descriptor on GPGPU.\n \n \n\n\n \n Iakovidou, C., Bampis, L., Chatzichristofis, S., Boutalis, Y., & Amanatiadis, A.\n\n\n \n\n\n\n In Proceedings - 23rd Euromicro International Conference on Parallel, Distributed, and Network-Based Processing, PDP 2015, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Color and Edge Directivity descriptor on GPGPU},\n type = {inproceedings},\n year = {2015},\n id = {f0335116-dd8e-333a-bb4b-8a0fe1c55adb},\n created = {2024-03-30T13:53:01.661Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.661Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Image indexing refers to describing the visual multimedia content of a medium, using high level textual information or/and low level descriptors. In most cases, images and videos are associated with noisy and incomplete usersupplied textual annotations, possibly due to omission or the excessive cost associated with the metadata creation. In such cases, Content Based Image Retrieval (CBIR) approaches are adopted and low level image features are employed for indexing and retrieval. We employ the Colour and Edge Directivity Descriptor (CEDD), which incorporates both colour and texture information in a compact representation and reassess it for parallel execution, utilizing the multicore power provided by General Purpose Graphic Processing Units (GPGPUs). Experiments conducted on four different combinations of GPU-CPU technologies revealed an impressive gained acceleration when using a GPU, which was up to 22 times faster compared to the respective CPU implementation, while real-time indexing was achieved for all tested GPU models.},\n bibtype = {inproceedings},\n author = {Iakovidou, C. and Bampis, L. and Chatzichristofis, S.A. and Boutalis, Y.S. and Amanatiadis, A.},\n doi = {10.1109/PDP.2015.105},\n booktitle = {Proceedings - 23rd Euromicro International Conference on Parallel, Distributed, and Network-Based Processing, PDP 2015}\n}
\n
\n\n\n
\n Image indexing refers to describing the visual multimedia content of a medium, using high level textual information or/and low level descriptors. In most cases, images and videos are associated with noisy and incomplete usersupplied textual annotations, possibly due to omission or the excessive cost associated with the metadata creation. In such cases, Content Based Image Retrieval (CBIR) approaches are adopted and low level image features are employed for indexing and retrieval. We employ the Colour and Edge Directivity Descriptor (CEDD), which incorporates both colour and texture information in a compact representation and reassess it for parallel execution, utilizing the multicore power provided by General Purpose Graphic Processing Units (GPGPUs). Experiments conducted on four different combinations of GPU-CPU technologies revealed an impressive gained acceleration when using a GPU, which was up to 22 times faster compared to the respective CPU implementation, while real-time indexing was achieved for all tested GPU models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Can speedup assist accuracy? An on-board gpu-accelerated image georeference method for UAVs.\n \n \n\n\n \n Bampis, L., Karakasis, E., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n Volume 9163 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Can speedup assist accuracy? An on-board gpu-accelerated image georeference method for UAVs},\n type = {book},\n year = {2015},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n volume = {9163},\n id = {3e1e1ede-2eb4-3281-a410-021568e4e109},\n created = {2024-03-30T13:53:01.672Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.672Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents a georeferenced map extraction method, for Medium-Altitude Long-Endurance UAVs. The adopted technique of projecting world points to an image plane is a perfect candidate for a GPU implementation. The achieved high frame rate leads to a plethora of measurements even in the case of a low-power mobile processing unit. These measurements can later be combined in order to refine the output and create a more accurate result.},\n bibtype = {book},\n author = {Bampis, L. and Karakasis, E.G. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1007/978-3-319-20904-3_10}\n}
\n
\n\n\n
\n This paper presents a georeferenced map extraction method, for Medium-Altitude Long-Endurance UAVs. The adopted technique of projecting world points to an image plane is a perfect candidate for a GPU implementation. The achieved high frame rate leads to a plethora of measurements even in the case of a low-power mobile processing unit. These measurements can later be combined in order to refine the output and create a more accurate result.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Digital elevation model fusion using spectral methods.\n \n \n\n\n \n Karakasis, E., Bampis, L., Amanatiadis, A., Gasteratos, A., & Tsalides, P.\n\n\n \n\n\n\n In IST 2014 - 2014 IEEE International Conference on Imaging Systems and Techniques, Proceedings, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Digital elevation model fusion using spectral methods},\n type = {inproceedings},\n year = {2014},\n id = {6897074a-fef3-394e-a1a9-5bb57d647160},\n created = {2024-03-30T13:53:01.724Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.724Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents the application of different spectral methods, like Fourier series and polynomial-based expansions, to Digital Elevation Models (DEMs) in order to fuse their content. Two different fusion techniques: 1) a filter-based one and 2) a weighted average of expansion coefficients, are examined. Their performance is evaluated by using both ground-truth lidar data as well as fusion quality measures. The results point out that polynomial-based spectral expansions perform better than the traditional Fourier approach.},\n bibtype = {inproceedings},\n author = {Karakasis, E.G. and Bampis, L. and Amanatiadis, A. and Gasteratos, A. and Tsalides, P.},\n doi = {10.1109/IST.2014.6958501},\n booktitle = {IST 2014 - 2014 IEEE International Conference on Imaging Systems and Techniques, Proceedings}\n}
\n
\n\n\n
\n This paper presents the application of different spectral methods, like Fourier series and polynomial-based expansions, to Digital Elevation Models (DEMs) in order to fuse their content. Two different fusion techniques: 1) a filter-based one and 2) a weighted average of expansion coefficients, are examined. Their performance is evaluated by using both ground-truth lidar data as well as fusion quality measures. The results point out that polynomial-based spectral expansions perform better than the traditional Fourier approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Two-staged image colorization based on salient contours.\n \n \n\n\n \n Anagnostopoulos, N., Iakovidou, C., Amanatiadis, A., Boutalis, Y., & Chatzichristofis, S.\n\n\n \n\n\n\n In IST 2014 - 2014 IEEE International Conference on Imaging Systems and Techniques, Proceedings, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Two-staged image colorization based on salient contours},\n type = {inproceedings},\n year = {2014},\n id = {6176cf6b-327d-3afa-ba6e-cc1335d1945a},\n created = {2024-03-30T13:53:01.731Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.731Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper we present a novel colorization technique that manages to significantly reduce color bleeding artifacts caused by weak object boundaries and also requires only abstract color indications and placement from the user. It is essentially a two-staged color propagation algorithm. Guided by the extracted salient contours of the image, we roughly mark and divide the image in two differently treated image area categories: Homogeneous color areas of high confidence and critical attention-needing areas of edges and region boundaries. The method was tested with user drawn scribble images, but can be easily adopted by image exemplars employing techniques, as well.},\n bibtype = {inproceedings},\n author = {Anagnostopoulos, N. and Iakovidou, C. and Amanatiadis, A. and Boutalis, Y. and Chatzichristofis, S.A.},\n doi = {10.1109/IST.2014.6958509},\n booktitle = {IST 2014 - 2014 IEEE International Conference on Imaging Systems and Techniques, Proceedings}\n}
\n
\n\n\n
\n In this paper we present a novel colorization technique that manages to significantly reduce color bleeding artifacts caused by weak object boundaries and also requires only abstract color indications and placement from the user. It is essentially a two-staged color propagation algorithm. Guided by the extracted salient contours of the image, we roughly mark and divide the image in two differently treated image area categories: Homogeneous color areas of high confidence and critical attention-needing areas of edges and region boundaries. The method was tested with user drawn scribble images, but can be easily adopted by image exemplars employing techniques, as well.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Identification and retrieval of DNA genomes using binary image representations produced by cellular automata.\n \n \n\n\n \n Konstantinidis, K., Amanatiadis, A., Chatzichristofis, S., Sandaltzopoulos, R., & Sirakoulis, G.\n\n\n \n\n\n\n In IST 2014 - 2014 IEEE International Conference on Imaging Systems and Techniques, Proceedings, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Identification and retrieval of DNA genomes using binary image representations produced by cellular automata},\n type = {inproceedings},\n year = {2014},\n id = {87d279f6-0b2c-3cef-b7f0-444c7fbb8699},\n created = {2024-03-30T13:53:01.779Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.779Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {We have developed a novel method for the identification and retrieval of DNA sequences which are represented as binary images. This type of representation emanates from the evolution of one-dimensional nucleotide arrays abiding to a set of Cellular Automaton rules. A thorough investigation of these rules was performed in order to determine their efficiency. The presented method has been applied on short nucleotide sequences as well as on eleven complete genes of various origins. The technology presented offers a novel approach for the rapid and efficient sequence identification of nucleotide sequences in database repositories. The proposed framework will be practically useful for applications involved in virus recognition and personalized medicine which rely heavily on the processing of huge volumes of nucleotide sequence data.},\n bibtype = {inproceedings},\n author = {Konstantinidis, K. and Amanatiadis, A. and Chatzichristofis, S.A. and Sandaltzopoulos, R. and Sirakoulis, G.Ch.},\n doi = {10.1109/IST.2014.6958460},\n booktitle = {IST 2014 - 2014 IEEE International Conference on Imaging Systems and Techniques, Proceedings}\n}
\n
\n\n\n
\n We have developed a novel method for the identification and retrieval of DNA sequences which are represented as binary images. This type of representation emanates from the evolution of one-dimensional nucleotide arrays abiding to a set of Cellular Automaton rules. A thorough investigation of these rules was performed in order to determine their efficiency. The presented method has been applied on short nucleotide sequences as well as on eleven complete genes of various origins. The technology presented offers a novel approach for the rapid and efficient sequence identification of nucleotide sequences in database repositories. The proposed framework will be practically useful for applications involved in virus recognition and personalized medicine which rely heavily on the processing of huge volumes of nucleotide sequence data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n How smart are smartphones?: Bridging the marketing and information technology gap.\n \n \n\n\n \n Amanatiadis, A., & Chatzichristofis, S.\n\n\n \n\n\n\n IEEE Consumer Electronics Magazine, 3(4). 2014.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {How smart are smartphones?: Bridging the marketing and information technology gap.},\n type = {article},\n year = {2014},\n volume = {3},\n id = {cfc70796-0c62-3360-a0dd-ac6bce4ac62c},\n created = {2024-03-30T13:53:01.787Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.787Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {The term ?smart? has become widespread in consumer electronics in recent years, reflecting the consumers? need for devices that assist them in their daily activities. The term has a long history of usage in marketing science as one of the most appealing ways of promoting or advertising a product, brand, or service. However, even today, there is much controversy in the definition of this term and even more ambiguities for the right use in consumer electronic devices. Furthermore, it is not possible to carry out any quantitative or qualitative analysis of how smart a device is without having some adequate conception of what a smart or intelligent application means. This article attempts to explore the smart and intelligent capabilities of the current and next-generation consumer devices by investigating certain propositions and arguments along with the current trends and future directions in information technology (IT).},\n bibtype = {article},\n author = {Amanatiadis, A. and Chatzichristofis, S.A.},\n doi = {10.1109/MCE.2014.2340053},\n journal = {IEEE Consumer Electronics Magazine},\n number = {4}\n}
\n
\n\n\n
\n The term ?smart? has become widespread in consumer electronics in recent years, reflecting the consumers? need for devices that assist them in their daily activities. The term has a long history of usage in marketing science as one of the most appealing ways of promoting or advertising a product, brand, or service. However, even today, there is much controversy in the definition of this term and even more ambiguities for the right use in consumer electronic devices. Furthermore, it is not possible to carry out any quantitative or qualitative analysis of how smart a device is without having some adequate conception of what a smart or intelligent application means. This article attempts to explore the smart and intelligent capabilities of the current and next-generation consumer devices by investigating certain propositions and arguments along with the current trends and future directions in information technology (IT).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Real-time robot path planning for dynamic obstacle avoidance.\n \n \n\n\n \n Charalampous, K., Kostavelis, I., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n Journal of Cellular Automata, 9(2-3). 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Real-time robot path planning for dynamic obstacle avoidance},\n type = {article},\n year = {2014},\n keywords = {3D point cloud,Cellular automata,Dynamic obstacles,Laser scanner,Obstacle avoidance,Robot path planning},\n volume = {9},\n id = {89d4fcf2-3812-3816-91e4-220cb38449e0},\n created = {2024-03-30T13:53:01.858Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.858Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper we present a method based on Cellular Automata (CA) rules, suitable for path planning in dynamically changing environments. The algorithm underlaying this method is the A* search one in combination with CAs, the discrete nature of which renders the method appropriate for both robot and obstacle state spaces. Moreover, the finite properties of the A* algorithm were amalgamated with the CA rules to built up a substantial search strategy. The proposed algorithm assures a collision-free cost-efficient path to target with optimal computational cost. The algorithm’s main attribute is that it expands the map state space with respect to time using adaptive time intervals to predict the potential expansion of obstacles, assuring a safe and minimum cost path. The proposed method has been examined in real world planar environments and exhibits remarkable performance, thus it can be applied to any arbitrary shaped obstacle.},\n bibtype = {article},\n author = {Charalampous, K. and Kostavelis, I. and Amanatiadis, A. and Gasteratos, A.},\n journal = {Journal of Cellular Automata},\n number = {2-3}\n}
\n
\n\n\n
\n In this paper we present a method based on Cellular Automata (CA) rules, suitable for path planning in dynamically changing environments. The algorithm underlaying this method is the A* search one in combination with CAs, the discrete nature of which renders the method appropriate for both robot and obstacle state spaces. Moreover, the finite properties of the A* algorithm were amalgamated with the CA rules to built up a substantial search strategy. The proposed algorithm assures a collision-free cost-efficient path to target with optimal computational cost. The algorithm’s main attribute is that it expands the map state space with respect to time using adaptive time intervals to predict the potential expansion of obstacles, assuring a safe and minimum cost path. The proposed method has been examined in real world planar environments and exhibits remarkable performance, thus it can be applied to any arbitrary shaped obstacle.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n The HCUAV project: Electronics and software development for medium altitude remote sensing.\n \n \n\n\n \n Amanatiadis, A., Karakasis, E., Bampis, L., Giitsidis, T., Panagiotou, P., Sirakoulis, G., Gasteratos, A., Tsalides, P., Goulas, A., & Yakinthos, K.\n\n\n \n\n\n\n In 12th IEEE International Symposium on Safety, Security and Rescue Robotics, SSRR 2014 - Symposium Proceedings, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {The HCUAV project: Electronics and software development for medium altitude remote sensing},\n type = {inproceedings},\n year = {2014},\n id = {5b19aa14-411e-3189-9393-cbadcf8a0f55},\n created = {2024-03-30T13:53:01.861Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.861Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {The continuous increase of illegal migration flows to southern European countries has been recently in the spotlight of European Union due to numerous deadly incidents. Another common issue that the aforementioned countries share is the Mediterranean wildfires which are becoming more frequent due to the warming climate and increasing magnitudes of droughts. Different ground early warning systems have been funded and developed across these countries separately for these incidents, however they have been proved insufficient mainly because of the limited surveyed areas and challenging Mediterranean shoreline and landscape. In 2011, the Greek Government along with European Commission, decided to support the development of the first Hellenic Civil Unmanned Aerial Vehicle (HCUAV), which will provide solutions to both illegal migration and wildfires. This paper presents the challenges in the electronics and software design, and especially the under development solutions for detection of human and fire activity, image mosaicking and orthorectification using commercial off-the-shelf sensors. Preliminary experimental results of the HCUAV medium altitude remote sensing algorithms, show accurate and adequate results using low cost sensors and electronic devices.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Karakasis, E.G. and Bampis, L. and Giitsidis, T. and Panagiotou, P. and Sirakoulis, G.C. and Gasteratos, A. and Tsalides, P. and Goulas, A. and Yakinthos, K.},\n doi = {10.1109/SSRR.2014.7017668},\n booktitle = {12th IEEE International Symposium on Safety, Security and Rescue Robotics, SSRR 2014 - Symposium Proceedings}\n}
\n
\n\n\n
\n The continuous increase of illegal migration flows to southern European countries has been recently in the spotlight of European Union due to numerous deadly incidents. Another common issue that the aforementioned countries share is the Mediterranean wildfires which are becoming more frequent due to the warming climate and increasing magnitudes of droughts. Different ground early warning systems have been funded and developed across these countries separately for these incidents, however they have been proved insufficient mainly because of the limited surveyed areas and challenging Mediterranean shoreline and landscape. In 2011, the Greek Government along with European Commission, decided to support the development of the first Hellenic Civil Unmanned Aerial Vehicle (HCUAV), which will provide solutions to both illegal migration and wildfires. This paper presents the challenges in the electronics and software design, and especially the under development solutions for detection of human and fire activity, image mosaicking and orthorectification using commercial off-the-shelf sensors. Preliminary experimental results of the HCUAV medium altitude remote sensing algorithms, show accurate and adequate results using low cost sensors and electronic devices.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Accelerating image super-resolution regression by a hybrid implementation in mobile devices.\n \n \n\n\n \n Amanatiadis, A., Bampis, L., & Gasteratos, A.\n\n\n \n\n\n\n In Digest of Technical Papers - IEEE International Conference on Consumer Electronics, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Accelerating image super-resolution regression by a hybrid implementation in mobile devices},\n type = {inproceedings},\n year = {2014},\n id = {e7cfafac-aba2-327d-bb4c-50837bc184ff},\n created = {2024-03-30T13:53:01.918Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.918Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper introduces a new super-resolution algorithm based on machine learning along with a novel hybrid implementation for next generation mobile devices. The proposed super-resolution algorithm entails a multivariate polynomial regression method using only the input image properties for the learning task. Although it is widely believed that machine learning algorithms are not appropriate for real-time implementation, the paper in hand proves that there are indeed specific hypothesis representations that are able to be integrated into real-time mobile applications. With aim to achieve this goal, we take advantage of the increasing GPU employment in modern mobile devices. More precisely, we utilize the mobile GPU as a co-processor in a hybrid pipelined implementation achieving significant performance speedup along with superior quantitative interpolation results. © 2014 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Bampis, L. and Gasteratos, A.},\n doi = {10.1109/ICCE.2014.6776029},\n booktitle = {Digest of Technical Papers - IEEE International Conference on Consumer Electronics}\n}
\n
\n\n\n
\n This paper introduces a new super-resolution algorithm based on machine learning along with a novel hybrid implementation for next generation mobile devices. The proposed super-resolution algorithm entails a multivariate polynomial regression method using only the input image properties for the learning task. Although it is widely believed that machine learning algorithms are not appropriate for real-time implementation, the paper in hand proves that there are indeed specific hypothesis representations that are able to be integrated into real-time mobile applications. With aim to achieve this goal, we take advantage of the increasing GPU employment in modern mobile devices. More precisely, we utilize the mobile GPU as a co-processor in a hybrid pipelined implementation achieving significant performance speedup along with superior quantitative interpolation results. © 2014 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n The AVERT project: Autonomous Vehicle Emergency Recovery Tool.\n \n \n\n\n \n Amanatiadis, A., Charalampous, K., Kostavelis, I., Gasteratos, A., Birkicht, B., Braunstein, J., Meiser, V., Henschel, C., Baugh, S., Paul, M., & May, R.\n\n\n \n\n\n\n In 2013 IEEE International Symposium on Safety, Security, and Rescue Robotics, SSRR 2013, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {The AVERT project: Autonomous Vehicle Emergency Recovery Tool},\n type = {inproceedings},\n year = {2013},\n id = {95c293e1-705f-3731-a921-85632b6114ce},\n created = {2024-03-30T13:53:01.927Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.927Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Terrorism threatens horrific loss of life, extensive disruption to city transport and damage to commercial real estate. Vehicles provide an ideal delivery mechanism because they can be meticulously prepared well in advance of deployment and then brought into the area of operations. Furthermore, a real and present danger comes from the threat of Chemical, Radiological, Biological and Nuclear (CRBN) contamination. Current methods of bomb disruption and neutralisation are hindered in the event that the device is shielded, blocked or for whatever reason cannot be accessed for examination. The Autonomous Vehicle Emergency Recovery Tool (AVERT) project introduces a unique capability to Police and Armed Services to rapidly deploy, extract and remove blocking vehicles from vulnerable positions such as enclosed infrastructure spaces, tunnels, low bridges as well as under-building and underground car parks. Within the AVERT project, vehicles can be removed from confined spaces with delicate handling, swiftly and in any direction to a safer disposal point to reduce or eliminate collateral damage to infrastructure and personnel. The overall system will be commanded remotely and shall operate autonomously under its own power and sensor awareness, as a critical tool alongside existing technologies, thereby enhancing bomb disposal response speed and safety. © 2013 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Charalampous, K. and Kostavelis, I. and Gasteratos, A. and Birkicht, B. and Braunstein, J. and Meiser, V. and Henschel, C. and Baugh, S. and Paul, M. and May, R.},\n doi = {10.1109/SSRR.2013.6719342},\n booktitle = {2013 IEEE International Symposium on Safety, Security, and Rescue Robotics, SSRR 2013}\n}
\n
\n\n\n
\n Terrorism threatens horrific loss of life, extensive disruption to city transport and damage to commercial real estate. Vehicles provide an ideal delivery mechanism because they can be meticulously prepared well in advance of deployment and then brought into the area of operations. Furthermore, a real and present danger comes from the threat of Chemical, Radiological, Biological and Nuclear (CRBN) contamination. Current methods of bomb disruption and neutralisation are hindered in the event that the device is shielded, blocked or for whatever reason cannot be accessed for examination. The Autonomous Vehicle Emergency Recovery Tool (AVERT) project introduces a unique capability to Police and Armed Services to rapidly deploy, extract and remove blocking vehicles from vulnerable positions such as enclosed infrastructure spaces, tunnels, low bridges as well as under-building and underground car parks. Within the AVERT project, vehicles can be removed from confined spaces with delicate handling, swiftly and in any direction to a safer disposal point to reduce or eliminate collateral damage to infrastructure and personnel. The overall system will be commanded remotely and shall operate autonomously under its own power and sensor awareness, as a critical tool alongside existing technologies, thereby enhancing bomb disposal response speed and safety. © 2013 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n How do you help a robot to find a place? A supervised learning paradigm to semantically infer about places.\n \n \n\n\n \n Kostavelis, I., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n Volume 8073 LNAI 2013.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@book{\n title = {How do you help a robot to find a place? A supervised learning paradigm to semantically infer about places},\n type = {book},\n year = {2013},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n keywords = {HTM,place recognition,robot navigation,saliency map,semantics},\n volume = {8073 LNAI},\n id = {adf063f7-2149-3b87-8402-18ce5a1e1b1d},\n created = {2024-03-30T13:53:01.981Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.981Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper a visual place recognition algorithm suitable for semantic inference is presented. It combines place and object classification attributes suitable for the recognition of congested and cluttered scenes. The place learning task is undertaken by a method capable of abstracting appearance information from the places to be memorized. The detected visual features are treated as a bag of words and quantized by a clustering algorithm to form a visual vocabulary of the explored places. Each query image is represented by a consistency histogram spread over the memorized vocabulary. Simultaneously, an object recognition approach based on Hierarchical Temporal Memory network, updates the robot's belief of its current position exploiting the features of scattered objects within the scene. The input images which are introduced to the network undergo a saliency computation step and are subsequently thresholded based on an entropy metric for detecting multiple objects. The place and object decisions are fused by voting to infer the semantic attributes of a particular place. The efficiency of the proposed framework has been experimentally evaluated on a real dataset and proved capable of accurately recognizing multiple dissimilar places. © 2013 Springer-Verlag.},\n bibtype = {book},\n author = {Kostavelis, I. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1007/978-3-642-40846-5_33}\n}
\n
\n\n\n
\n In this paper a visual place recognition algorithm suitable for semantic inference is presented. It combines place and object classification attributes suitable for the recognition of congested and cluttered scenes. The place learning task is undertaken by a method capable of abstracting appearance information from the places to be memorized. The detected visual features are treated as a bag of words and quantized by a clustering algorithm to form a visual vocabulary of the explored places. Each query image is represented by a consistency histogram spread over the memorized vocabulary. Simultaneously, an object recognition approach based on Hierarchical Temporal Memory network, updates the robot's belief of its current position exploiting the features of scattered objects within the scene. The input images which are introduced to the network undergo a saliency computation step and are subsequently thresholded based on an entropy metric for detecting multiple objects. The place and object decisions are fused by voting to infer the semantic attributes of a particular place. The efficiency of the proposed framework has been experimentally evaluated on a real dataset and proved capable of accurately recognizing multiple dissimilar places. © 2013 Springer-Verlag.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A multi-objective exploration strategy for mobile robots under operational constraints.\n \n \n\n\n \n Amanatiadis, A., Chatzichristofis, S., Charalampous, K., Doitsidis, L., Kosmatopoulos, E., Tsalides, P., Gasteratos, A., & Roumeliotis, S.\n\n\n \n\n\n\n IEEE Access, 1. 2013.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A multi-objective exploration strategy for mobile robots under operational constraints},\n type = {article},\n year = {2013},\n keywords = {Autonomous agents,Cognitive robotics,Optimization methods,Path planning},\n volume = {1},\n id = {b6cdcd11-1b88-3263-b9f7-8d44ee358b09},\n created = {2024-03-30T13:53:01.991Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:01.991Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Multi-objective robot exploration constitutes one of the most challenging tasks for autonomous robots performing in various operations and different environments. However, the optimal exploration path depends heavily on the objectives and constraints that both these operations and environments introduce. Typical environment constraints include partially known or completely unknown workspaces, limitedbandwidth communications, and sparse or dense clattered spaces. In such environments, the exploration robots must satisfy additional operational constraints, including time-critical goals, kinematic modeling, and resource limitations. Finding the optimal exploration path under these multiple constraints and objectives constitutes a challenging non-convex optimization problem. In our approach, we model the environment constraints in cost functions and utilize the cognitive-based adaptive optimization algorithm to meet timecritical objectives. The exploration path produced is optimal in the sense of globally minimizing the required time as well as maximizing the explored area of a partially unknown workspace. Since obstacles are sensed during operation, initial paths are possible to be blocked leading to a robot entrapment. A supervisor is triggered to signal a blocked passage and subsequently escape from the basin of cost function local minimum. Extensive simulations and comparisons in typical scenarios are presented to show the efficiency of the proposed approach. © 2013 IEEE.},\n bibtype = {article},\n author = {Amanatiadis, A.A. and Chatzichristofis, S.A. and Charalampous, K. and Doitsidis, L. and Kosmatopoulos, E.B. and Tsalides, P. and Gasteratos, A. and Roumeliotis, S.I.},\n doi = {10.1109/ACCESS.2013.2283031},\n journal = {IEEE Access}\n}
\n
\n\n\n
\n Multi-objective robot exploration constitutes one of the most challenging tasks for autonomous robots performing in various operations and different environments. However, the optimal exploration path depends heavily on the objectives and constraints that both these operations and environments introduce. Typical environment constraints include partially known or completely unknown workspaces, limitedbandwidth communications, and sparse or dense clattered spaces. In such environments, the exploration robots must satisfy additional operational constraints, including time-critical goals, kinematic modeling, and resource limitations. Finding the optimal exploration path under these multiple constraints and objectives constitutes a challenging non-convex optimization problem. In our approach, we model the environment constraints in cost functions and utilize the cognitive-based adaptive optimization algorithm to meet timecritical objectives. The exploration path produced is optimal in the sense of globally minimizing the required time as well as maximizing the explored area of a partially unknown workspace. Since obstacles are sensed during operation, initial paths are possible to be blocked leading to a robot entrapment. A supervisor is triggered to signal a blocked passage and subsequently escape from the basin of cost function local minimum. Extensive simulations and comparisons in typical scenarios are presented to show the efficiency of the proposed approach. © 2013 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Pose manifolds for efficient visual servoing.\n \n \n\n\n \n Kouskouridas, R., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n In IST 2012 - 2012 IEEE International Conference on Imaging Systems and Techniques, Proceedings, 2012. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Pose manifolds for efficient visual servoing},\n type = {inproceedings},\n year = {2012},\n id = {8876c2f4-fa54-385c-9ee4-d41c3e4daaee},\n created = {2024-03-30T13:53:02.050Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.050Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In order to adequately accomplish vision-based manipulation tasks, robotic platforms require an accurate estimation of the 3D pose of the target, which is efficiently approached by imaging techniques excessively utilizing large databases that consist of images of several objects captured under varying viewpoints. However, such approaches are characterized by large computational burden and complexity accompanied by limited capacities to interpolate between two known instances of an object. To address these issues we propose a robust 3D object pose estimation technique that entails a manifold modeling procedure based on appearance, geometrical and shape attributes of objects. We utilize a bunch-based method that is followed by a shape descriptor module, in order to establish low dimensional pose manifolds capable of distinguishing similar poses of different objects into the corresponding classes. Finally, an accurate estimation of the 3D pose of a target is provided by a neural network-based solution that encompasses a novel input-output space targeting method. We have comparatively studied the performance of our method against other related works, whilst experimental results justify our theoretical claims and provide evidence of low generalization error. © 2012 IEEE.},\n bibtype = {inproceedings},\n author = {Kouskouridas, R. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1109/IST.2012.6295582},\n booktitle = {IST 2012 - 2012 IEEE International Conference on Imaging Systems and Techniques, Proceedings}\n}
\n
\n\n\n
\n In order to adequately accomplish vision-based manipulation tasks, robotic platforms require an accurate estimation of the 3D pose of the target, which is efficiently approached by imaging techniques excessively utilizing large databases that consist of images of several objects captured under varying viewpoints. However, such approaches are characterized by large computational burden and complexity accompanied by limited capacities to interpolate between two known instances of an object. To address these issues we propose a robust 3D object pose estimation technique that entails a manifold modeling procedure based on appearance, geometrical and shape attributes of objects. We utilize a bunch-based method that is followed by a shape descriptor module, in order to establish low dimensional pose manifolds capable of distinguishing similar poses of different objects into the corresponding classes. Finally, an accurate estimation of the 3D pose of a target is provided by a neural network-based solution that encompasses a novel input-output space targeting method. We have comparatively studied the performance of our method against other related works, whilst experimental results justify our theoretical claims and provide evidence of low generalization error. © 2012 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Sparse deep-learning algorithm for recognition and categorisation.\n \n \n\n\n \n Charalampous, K., Kostavelis, I., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n Electronics Letters, 48(20). 2012.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Sparse deep-learning algorithm for recognition and categorisation},\n type = {article},\n year = {2012},\n volume = {48},\n id = {eb2daf0b-b83b-3541-ae73-6c3cea0b3e32},\n created = {2024-03-30T13:53:02.054Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.054Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Presented is a deep-learning method for pattern classification and object recognition. The proposed methodology is based on an optimised version of the hierarchical temporal memory (HTM) algorithm and it preserves its basic structure, along with a tree structure of connected nodes. The tree structured scheme is inspired by the human neocortex, which provides great capabilities for recognition and categorisation. The proposed method is enriched with more representative quantisation centres using an adaptive neural gas algorithm, and a more accurate and dense grouping by applying a graph clustering technique. Sparse representation using L 1 norm minimisation is embedded as a liaison between the quantisation centres and their grouping, reinforcing the proposed technique with advantages, such as a natural discrimination capability. The proposed work is experimentally compared with the aforementioned techniques as well as with state-of-the-art algorithms, presenting a better classification performance. © 2012 The Institution of Engineering and Technology.},\n bibtype = {article},\n author = {Charalampous, K. and Kostavelis, I. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1049/el.2012.1033},\n journal = {Electronics Letters},\n number = {20}\n}
\n
\n\n\n
\n Presented is a deep-learning method for pattern classification and object recognition. The proposed methodology is based on an optimised version of the hierarchical temporal memory (HTM) algorithm and it preserves its basic structure, along with a tree structure of connected nodes. The tree structured scheme is inspired by the human neocortex, which provides great capabilities for recognition and categorisation. The proposed method is enriched with more representative quantisation centres using an adaptive neural gas algorithm, and a more accurate and dense grouping by applying a graph clustering technique. Sparse representation using L 1 norm minimisation is embedded as a liaison between the quantisation centres and their grouping, reinforcing the proposed technique with advantages, such as a natural discrimination capability. The proposed work is experimentally compared with the aforementioned techniques as well as with state-of-the-art algorithms, presenting a better classification performance. © 2012 The Institution of Engineering and Technology.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Binary image 2d shape learning and recognition based on lattice-computing (LC) techniques.\n \n \n\n\n \n Kaburlasos, V., Papadakis, S., & Amanatiadis, A.\n\n\n \n\n\n\n Journal of Mathematical Imaging and Vision, 42(2-3). 2012.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Binary image 2d shape learning and recognition based on lattice-computing (LC) techniques},\n type = {article},\n year = {2012},\n keywords = {2D shape representation,Fuzzy lattice reasoning,Granular data,Inclusion measure,Intervals' number,Lattice computing,Learning,Pattern recognition,Sigmoid function},\n volume = {42},\n id = {bfef5643-6b3c-3752-b897-b730c8b50f5b},\n created = {2024-03-30T13:53:02.110Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.110Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This work introduces a Type-II fuzzy lattice reasoning (FLRtypeII) scheme for learning/generalizing novel 2D shape representations. A 2D shape is represented as an element-induced from populations of three different shape descriptors-in the product lattice (F3,-), where (F,-) denotes the lattice of Type-I intervals' numbers (INs). Learning is carried out by inducing Type-II INs, i.e. intervals in (F,-). Our proposed techniques compare well with alternative classification methods from the literature in three benchmark classification problems. Competitive advantages include an accommodation of granular data as well as a visual representation of a class. We discuss extensions to gray/color images, etc. © Springer Science+Business Media, LLC 2011.},\n bibtype = {article},\n author = {Kaburlasos, V.G. and Papadakis, S.E. and Amanatiadis, A.},\n doi = {10.1007/s10851-011-0301-3},\n journal = {Journal of Mathematical Imaging and Vision},\n number = {2-3}\n}
\n
\n\n\n
\n This work introduces a Type-II fuzzy lattice reasoning (FLRtypeII) scheme for learning/generalizing novel 2D shape representations. A 2D shape is represented as an element-induced from populations of three different shape descriptors-in the product lattice (F3,-), where (F,-) denotes the lattice of Type-I intervals' numbers (INs). Learning is carried out by inducing Type-II INs, i.e. intervals in (F,-). Our proposed techniques compare well with alternative classification methods from the literature in three benchmark classification problems. Competitive advantages include an accommodation of granular data as well as a visual representation of a class. We discuss extensions to gray/color images, etc. © Springer Science+Business Media, LLC 2011.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Imaging systems and techniques 2011.\n \n \n\n\n \n Giakos, G., Abdullah, M., Yang, W., Petrou, M., Nikita, K., Pastorino, M., Zervakis, M., Amanatiadis, A., Karras, D., Ceccarelli, M., Iakovidis, D., Zentai, G., Svelto, C., & Gasteratos, A.\n\n\n \n\n\n\n Measurement Science and Technology, 23(11). 2012.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Imaging systems and techniques 2011},\n type = {article},\n year = {2012},\n volume = {23},\n id = {68dedf91-ea1c-3ec7-bb62-05d64076aabf},\n created = {2024-03-30T13:53:02.128Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.128Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n bibtype = {article},\n author = {Giakos, G. and Abdullah, M.Z. and Yang, W. and Petrou, M. and Nikita, K. and Pastorino, M. and Zervakis, M. and Amanatiadis, A. and Karras, D.A. and Ceccarelli, M. and Iakovidis, D. and Zentai, G. and Svelto, C. and Gasteratos, A.},\n doi = {10.1088/0957-0233/23/11/110101},\n journal = {Measurement Science and Technology},\n number = {11}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n Efficient robot path planning in the presence of dynamically expanding obstacles.\n \n \n\n\n \n Charalampous, K., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n Volume 7495 LNCS 2012.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@book{\n title = {Efficient robot path planning in the presence of dynamically expanding obstacles},\n type = {book},\n year = {2012},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n keywords = {cellular automata,obstacle avoidance,robot path planning},\n volume = {7495 LNCS},\n id = {4656bb78-b816-3d65-a120-53230e5634e0},\n created = {2024-03-30T13:53:02.177Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.177Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents a framework for robot path planning based on the A*search algorithm in the presence of dynamically expanding obstacles. The overall method follows Cellular Automata (CA) based rules, exploiting the discrete nature of CAs for both obstacle and robot state spaces. For the search strategy, the discrete properties of the A*algorithm were utilized, allowing a seamless merging of both CA and A*theories. The proposed algorithm guarantees both a collision free and a cost efficient path to target with optimal computational cost. More particular, it expands the map state space with respect to time using adaptive time intervals in order to predict the necessary future expansion of obstacles for assuring both a safe and a minimum cost path. The proposed method can be considered as being a general framework in the sense that it can be applied to any arbitrary shaped obstacle. © 2012 Springer-Verlag Berlin Heidelberg.},\n bibtype = {book},\n author = {Charalampous, K. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1007/978-3-642-33350-7_34}\n}
\n
\n\n\n
\n This paper presents a framework for robot path planning based on the A*search algorithm in the presence of dynamically expanding obstacles. The overall method follows Cellular Automata (CA) based rules, exploiting the discrete nature of CAs for both obstacle and robot state spaces. For the search strategy, the discrete properties of the A*algorithm were utilized, allowing a seamless merging of both CA and A*theories. The proposed algorithm guarantees both a collision free and a cost efficient path to target with optimal computational cost. More particular, it expands the map state space with respect to time using adaptive time intervals in order to predict the necessary future expansion of obstacles for assuring both a safe and a minimum cost path. The proposed method can be considered as being a general framework in the sense that it can be applied to any arbitrary shaped obstacle. © 2012 Springer-Verlag Berlin Heidelberg.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Guiding a robotic gripper by visual feedback for object manipulation tasks.\n \n \n\n\n \n Kouskouridas, R., Amanatiadis, A., & Gasteratos, A.\n\n\n \n\n\n\n In 2011 IEEE International Conference on Mechatronics, ICM 2011 - Proceedings, 2011. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Guiding a robotic gripper by visual feedback for object manipulation tasks},\n type = {inproceedings},\n year = {2011},\n id = {2b3b6921-feec-3933-baeb-c9aff2c18529},\n created = {2024-03-30T13:53:02.193Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.193Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents a novel object manipulation technique that could be adopted by any advanced mechatronic platform in order to perform demanding pick and place tasks. The ultimate goal of a robotics researcher is to provide an applicable manipulation solution that minimizes user's involvement. It has been shown that the best solution to this problem is provided by the introduction of sensors that allow an automatic or, at least, semi-automatic grasping of the targets. The proposed method relies on a vision-based framework that is responsible for several vital tasks that affect directly the manipulation process. The contribution of the paper incorporates a shape retrieval technique accompanied with classification and clustering algorithms that are utilized during the objects' pose estimation process. The experimental results obtained confirm the validity of the presented approach. © 2011 IEEE.},\n bibtype = {inproceedings},\n author = {Kouskouridas, R. and Amanatiadis, A. and Gasteratos, A.},\n doi = {10.1109/ICMECH.2011.5971325},\n booktitle = {2011 IEEE International Conference on Mechatronics, ICM 2011 - Proceedings}\n}
\n
\n\n\n
\n This paper presents a novel object manipulation technique that could be adopted by any advanced mechatronic platform in order to perform demanding pick and place tasks. The ultimate goal of a robotics researcher is to provide an applicable manipulation solution that minimizes user's involvement. It has been shown that the best solution to this problem is provided by the introduction of sensors that allow an automatic or, at least, semi-automatic grasping of the targets. The proposed method relies on a vision-based framework that is responsible for several vital tasks that affect directly the manipulation process. The contribution of the paper incorporates a shape retrieval technique accompanied with classification and clustering algorithms that are utilized during the objects' pose estimation process. The experimental results obtained confirm the validity of the presented approach. © 2011 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Efficient hierarchical matching algorithm for processing uncalibrated stereo vision images and its hardware architecture.\n \n \n\n\n \n Nalpantidis, L., Amanatiadis, A., Sirakoulis, G., & Gasteratos, A.\n\n\n \n\n\n\n IET Image Processing, 5(5). 2011.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Efficient hierarchical matching algorithm for processing uncalibrated stereo vision images and its hardware architecture},\n type = {article},\n year = {2011},\n volume = {5},\n id = {938cc976-91fc-397f-82ab-52ace5fa18fa},\n created = {2024-03-30T13:53:02.237Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.237Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In motion estimation, the sub-pixel matching technique involves the search of sub-sample positions as well as integer-sample positions between the image pairs, choosing the one that gives the best match. Based on this idea, this work proposes an estimation algorithm, which performs a 2-D correspondence search using a hierarchical search pattern. The intermediate results are refined by 3-D cellular automata (CA). The disparity value is then defined using the distance of the matching position. Therefore the proposed algorithm can process uncalibrated and non-rectified stereo image pairs, maintaining the computational load within reasonable levels. Additionally, a hardware architecture of the algorithm is deployed. Its performance has been evaluated on both synthetic and real self-captured image sets. Its attributes, make the proposed method suitable for autonomous outdoor robotic applications. © 2011 The Institution of Engineering and Technology.},\n bibtype = {article},\n author = {Nalpantidis, L. and Amanatiadis, A. and Sirakoulis, G.C. and Gasteratos, A.},\n doi = {10.1049/iet-ipr.2009.0262},\n journal = {IET Image Processing},\n number = {5}\n}
\n
\n\n\n
\n In motion estimation, the sub-pixel matching technique involves the search of sub-sample positions as well as integer-sample positions between the image pairs, choosing the one that gives the best match. Based on this idea, this work proposes an estimation algorithm, which performs a 2-D correspondence search using a hierarchical search pattern. The intermediate results are refined by 3-D cellular automata (CA). The disparity value is then defined using the distance of the matching position. Therefore the proposed algorithm can process uncalibrated and non-rectified stereo image pairs, maintaining the computational load within reasonable levels. Additionally, a hardware architecture of the algorithm is deployed. Its performance has been evaluated on both synthetic and real self-captured image sets. Its attributes, make the proposed method suitable for autonomous outdoor robotic applications. © 2011 The Institution of Engineering and Technology.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Evaluation of shape descriptors for shape-based image retrieval.\n \n \n\n\n \n Amanatiadis, A., Kaburlasos, V., Gasteratos, A., & Papadakis, S.\n\n\n \n\n\n\n IET Image Processing, 5(5). 2011.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Evaluation of shape descriptors for shape-based image retrieval},\n type = {article},\n year = {2011},\n volume = {5},\n id = {e8db983c-c64e-3dca-9473-5e7ab1989033},\n created = {2024-03-30T13:53:02.247Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.247Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This article presents a comparative study between scale, rotation and translation invariant descriptors for shape representation and retrieval. Since shape is one of the most widely used image feature exploited in content-based image retrieval systems, the authors studied for each descriptor, the number of coefficients needed for indexing and their retrieval performance. Specifically, the authors studied Fourier, curvature scale space, angular radial transform (ART) and image moment descriptors for shape representation. The four shape descriptors are evaluated against each other using the standard methodology and the two most appropriate and available databases. The results showed that moment descriptors present the best performance in terms of shape representation quality while ART presents the lowest descriptor size. © 2011 The Institution of Engineering and Technology.},\n bibtype = {article},\n author = {Amanatiadis, A. and Kaburlasos, V.G. and Gasteratos, A. and Papadakis, S.E.},\n doi = {10.1049/iet-ipr.2009.0246},\n journal = {IET Image Processing},\n number = {5}\n}
\n
\n\n\n
\n This article presents a comparative study between scale, rotation and translation invariant descriptors for shape representation and retrieval. Since shape is one of the most widely used image feature exploited in content-based image retrieval systems, the authors studied for each descriptor, the number of coefficients needed for indexing and their retrieval performance. Specifically, the authors studied Fourier, curvature scale space, angular radial transform (ART) and image moment descriptors for shape representation. The four shape descriptors are evaluated against each other using the standard methodology and the two most appropriate and available databases. The results showed that moment descriptors present the best performance in terms of shape representation quality while ART presents the lowest descriptor size. © 2011 The Institution of Engineering and Technology.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n An intelligent multi-sensor system for first responder indoor navigation.\n \n \n\n\n \n Amanatiadis, A., Gasteratos, A., & Koulouriotis, D.\n\n\n \n\n\n\n Measurement Science and Technology, 22(11). 2011.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {An intelligent multi-sensor system for first responder indoor navigation},\n type = {article},\n year = {2011},\n keywords = {first responder,indoor navigation,pedestrian localization,sensor fusion},\n volume = {22},\n id = {abf9cce4-7455-3665-b1cb-1bf30c84b743},\n created = {2024-03-30T13:53:02.294Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.294Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents an indoor navigation system based on sensor data from first responder wearable modules. The system combines an inertial measurement unit, a digital camera and a radio frequency identification device in a way that allows the advantages of each sensor to be fully exploited. The key to this synergy is the extracted qualitative criteria which characterize the performance of each sensor subsystem at various first responder activities and operational conditions under certain time intervals. The accuracy of the detected walking pattern through measurements of the acceleration magnitude from the inertial sensor is utilized for the performance evaluation of the dead-reckoning algorithm. The amount of correct feature matches is linked to the three-dimensional scene representation from the camera navigation subsystem and finally, the degree of probability of each radio frequency identification location estimate is exploited as a straightforward qualitative criterion. The final fused location estimation is extracted after applying fuzzy if-then rules at each time interval. Since the inertial sensor suffers from accumulated drift, the rules of the fuzzy inference system drop the measurements from the inertial measurement unit whenever the other two subsystems perform adequately. Extensive comparison and experimental results based on the proposed architecture have shown not only better navigation effectiveness and lower positioning error compared with other first responder navigation systems but also increased accuracy in various and challenging operational conditions. © 2011 IOP Publishing Ltd.},\n bibtype = {article},\n author = {Amanatiadis, A. and Gasteratos, A. and Koulouriotis, D.},\n doi = {10.1088/0957-0233/22/11/114025},\n journal = {Measurement Science and Technology},\n number = {11}\n}
\n
\n\n\n
\n This paper presents an indoor navigation system based on sensor data from first responder wearable modules. The system combines an inertial measurement unit, a digital camera and a radio frequency identification device in a way that allows the advantages of each sensor to be fully exploited. The key to this synergy is the extracted qualitative criteria which characterize the performance of each sensor subsystem at various first responder activities and operational conditions under certain time intervals. The accuracy of the detected walking pattern through measurements of the acceleration magnitude from the inertial sensor is utilized for the performance evaluation of the dead-reckoning algorithm. The amount of correct feature matches is linked to the three-dimensional scene representation from the camera navigation subsystem and finally, the degree of probability of each radio frequency identification location estimate is exploited as a straightforward qualitative criterion. The final fused location estimation is extracted after applying fuzzy if-then rules at each time interval. Since the inertial sensor suffers from accumulated drift, the rules of the fuzzy inference system drop the measurements from the inertial measurement unit whenever the other two subsystems perform adequately. Extensive comparison and experimental results based on the proposed architecture have shown not only better navigation effectiveness and lower positioning error compared with other first responder navigation systems but also increased accuracy in various and challenging operational conditions. © 2011 IOP Publishing Ltd.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Imaging systems and techniques.\n \n \n\n\n \n Giakos, G., Yang, W., Petrou, M., Nikita, K., Pastorino, M., Amanatiadis, A., & Zentai, G.\n\n\n \n\n\n\n Measurement Science and Technology, 22(11). 2011.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Imaging systems and techniques},\n type = {article},\n year = {2011},\n volume = {22},\n id = {a68cab09-c2a0-32c9-b405-e15b8528ea56},\n created = {2024-03-30T13:53:02.312Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.312Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n bibtype = {article},\n author = {Giakos, G. and Yang, W. and Petrou, M. and Nikita, K.S. and Pastorino, M. and Amanatiadis, A. and Zentai, G.},\n doi = {10.1088/0957-0233/22/11/110101},\n journal = {Measurement Science and Technology},\n number = {11}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n A fuzzy multi-sensor architecture for indoor navigation.\n \n \n\n\n \n Amanatiadis, A., Chrysostomou, D., Koulouriotis, D., & Gasteratos, A.\n\n\n \n\n\n\n In 2010 IEEE International Conference on Imaging Systems and Techniques, IST 2010 - Proceedings, 2010. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {A fuzzy multi-sensor architecture for indoor navigation},\n type = {inproceedings},\n year = {2010},\n keywords = {First responder navigation system,Indoor navigation,Multi-sensor fusion,Pedestrian localization},\n id = {8a5a6d5a-3fd3-3e32-93d7-4ed46dd77681},\n created = {2024-03-30T13:53:02.373Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.373Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents an indoor navigation system based on sensor data from first responder wearable modules. The proposed system integrates data from an inertial sensor, a digital camera and a radio frequency identification device using a sophisticated fuzzy algorithm. To improve the navigation accuracy, different types of first responder activities and operational conditions were examined and classified according to extracted qualitative attributes. The vertical acceleration data, which indicates the periodic vibration during gait cycle, is used to evaluate the accuracy of the inertial based navigation subsystem. The amount of strong feature correspondences assess the quality of the three-dimensional scene knowledge from digital camera feedback. Finally, the qualitative attribute, in order to evaluate the efficiency of the radio frequency identification subsystem, is the degree of probability of each location estimate. Fuzzy if-then rules are then applied to these three attributes in order to carry out the fusion task. Simulation results based on the proposed architecture have shown better navigation effectiveness and lower positioning error compared with the used stand alone navigation systems. © 2010 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Chrysostomou, D. and Koulouriotis, D. and Gasteratos, A.},\n doi = {10.1109/IST.2010.5548497},\n booktitle = {2010 IEEE International Conference on Imaging Systems and Techniques, IST 2010 - Proceedings}\n}
\n
\n\n\n
\n This paper presents an indoor navigation system based on sensor data from first responder wearable modules. The proposed system integrates data from an inertial sensor, a digital camera and a radio frequency identification device using a sophisticated fuzzy algorithm. To improve the navigation accuracy, different types of first responder activities and operational conditions were examined and classified according to extracted qualitative attributes. The vertical acceleration data, which indicates the periodic vibration during gait cycle, is used to evaluate the accuracy of the inertial based navigation subsystem. The amount of strong feature correspondences assess the quality of the three-dimensional scene knowledge from digital camera feedback. Finally, the qualitative attribute, in order to evaluate the efficiency of the radio frequency identification subsystem, is the degree of probability of each location estimate. Fuzzy if-then rules are then applied to these three attributes in order to carry out the fusion task. Simulation results based on the proposed architecture have shown better navigation effectiveness and lower positioning error compared with the used stand alone navigation systems. © 2010 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n 2-D shape representation and recognition by lattice computing techniques.\n \n \n\n\n \n Kaburlasos, V., Amanatiadis, A., & Papadakis, S.\n\n\n \n\n\n\n Volume 6077 LNAI 2010.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@book{\n title = {2-D shape representation and recognition by lattice computing techniques},\n type = {book},\n year = {2010},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n keywords = {2-D shape classification,Fuzzy lattice reasoning (FLR),Inclusion measure,Intervals' number (IN),Lattice computing},\n volume = {6077 LNAI},\n issue = {PART 2},\n id = {e299186b-eed0-3902-948d-aaf50d053771},\n created = {2024-03-30T13:53:02.390Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.390Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {We consider binary images such that an image includes a single 2-D shape, from which we extract three populations of three different (shape) descriptors, respectively. Each population is represented by an Intervals' Number, or IN for short, in the mathematical lattice (F,≤) of INs. In conclusion, a 2-D shape is represented in the Cartesian product lattice (F3,≤). We present a 2-D shape classification scheme based on fuzzy lattice reasoning (FLR). Preliminary experimental results have been encouraging. We discuss the potential of Lattice Computing (LC) techniques in image representation and recognition applications. © 2010 Springer-Verlag.},\n bibtype = {book},\n author = {Kaburlasos, V.G. and Amanatiadis, A. and Papadakis, S.E.},\n doi = {10.1007/978-3-642-13803-4_49}\n}
\n
\n\n\n
\n We consider binary images such that an image includes a single 2-D shape, from which we extract three populations of three different (shape) descriptors, respectively. Each population is represented by an Intervals' Number, or IN for short, in the mathematical lattice (F,≤) of INs. In conclusion, a 2-D shape is represented in the Cartesian product lattice (F3,≤). We present a 2-D shape classification scheme based on fuzzy lattice reasoning (FLR). Preliminary experimental results have been encouraging. We discuss the potential of Lattice Computing (LC) techniques in image representation and recognition applications. © 2010 Springer-Verlag.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Digital image stabilization by independent component analysis.\n \n \n\n\n \n Amanatiadis, A., & Andreadis, I.\n\n\n \n\n\n\n IEEE Transactions on Instrumentation and Measurement, 59(7). 2010.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Digital image stabilization by independent component analysis},\n type = {article},\n year = {2010},\n keywords = {Digital image stabilization (DIS),Ego-motion determination,Image sequence analysis,Independent component analysis (ICA),Motion estimation},\n volume = {59},\n id = {33fbb3c6-1f4a-34c5-a981-06657b03e54c},\n created = {2024-03-30T13:53:02.438Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.438Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, we propose a novel digital-image-stabilization scheme based on independent component analysis (ICA). The method utilizes ICA and information obtained from the image sequence to deconvolve the ego-motion from the unwanted motion of the sequence. We notice that the motion observed in image sequences captured from consumer electronics such as handheld cameras and third-generation mobile phones is mainly caused by two independent motions: the camera motion (ego-motion) and the undesired hand jitter (high-frequency motion). The extensive and successful application of ICA in both the statistical and the signal processing community has helped us to realize that the independence property of these two primary signals facilitates the application of ICA for deconvolution by maximizing their statistical independence. Sets of estimated local motion vectors of the sequence are introduced to the ICA system for separation. Subsequently, we process the unmixed motion vectors to classify the signals into ego-motion and high-frequency motion. Subsequently, when the permutation ambiguity is resolved, the appropriate sign and energy are assigned to the ego-motion vector, resulting in the stabilized image sequence. Experimental results have shown that, apart from the successful deconvolution of the two different motions, the proposed scheme exhibits superior performance compared to other digital-image-stabilization algorithms. © 2006 IEEE.},\n bibtype = {article},\n author = {Amanatiadis, A.A. and Andreadis, I.},\n doi = {10.1109/TIM.2009.2028216},\n journal = {IEEE Transactions on Instrumentation and Measurement},\n number = {7}\n}
\n
\n\n\n
\n In this paper, we propose a novel digital-image-stabilization scheme based on independent component analysis (ICA). The method utilizes ICA and information obtained from the image sequence to deconvolve the ego-motion from the unwanted motion of the sequence. We notice that the motion observed in image sequences captured from consumer electronics such as handheld cameras and third-generation mobile phones is mainly caused by two independent motions: the camera motion (ego-motion) and the undesired hand jitter (high-frequency motion). The extensive and successful application of ICA in both the statistical and the signal processing community has helped us to realize that the independence property of these two primary signals facilitates the application of ICA for deconvolution by maximizing their statistical independence. Sets of estimated local motion vectors of the sequence are introduced to the ICA system for separation. Subsequently, we process the unmixed motion vectors to classify the signals into ego-motion and high-frequency motion. Subsequently, when the permutation ambiguity is resolved, the appropriate sign and energy are assigned to the ego-motion vector, resulting in the stabilized image sequence. Experimental results have shown that, apart from the successful deconvolution of the two different motions, the proposed scheme exhibits superior performance compared to other digital-image-stabilization algorithms. © 2006 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Dense disparity estimation using a hierarchical matching technique from uncalibrated stereo vision.\n \n \n\n\n \n Nalpantidis, L., Amanatiadis, A., Sirakoulis, G., Kyriakoulis, N., & Gasteratos, A.\n\n\n \n\n\n\n In 2009 IEEE International Workshop on Imaging Systems and Techniques, IST 2009 - Proceedings, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Dense disparity estimation using a hierarchical matching technique from uncalibrated stereo vision},\n type = {inproceedings},\n year = {2009},\n keywords = {Disparity estimation,Hierarchical matching,Uncalibrated stereo vision},\n id = {edafd95b-feb3-377a-bb90-d2235a0f972b},\n created = {2024-03-30T13:53:02.447Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.447Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In motion estimation, the sub-pixel matching technique involves the search of sub-sample positions as well as integer-sample positions between the image pairs, choosing the one that gives the best match. Based on this idea, the proposed disparity estimation algorithm performs a 2-D correspondence search using a hierarchical search pattern. The disparity value is then defined using the distance of the matching position. Therefore, the proposed algorithm can process non-rectified stereo image pairs, maintaining the computational load within reasonable levels. © 2009 IEEE.},\n bibtype = {inproceedings},\n author = {Nalpantidis, L. and Amanatiadis, A. and Sirakoulis, G. and Kyriakoulis, N. and Gasteratos, A.},\n doi = {10.1109/IST.2009.5071680},\n booktitle = {2009 IEEE International Workshop on Imaging Systems and Techniques, IST 2009 - Proceedings}\n}
\n
\n\n\n
\n In motion estimation, the sub-pixel matching technique involves the search of sub-sample positions as well as integer-sample positions between the image pairs, choosing the one that gives the best match. Based on this idea, the proposed disparity estimation algorithm performs a 2-D correspondence search using a hierarchical search pattern. The disparity value is then defined using the distance of the matching position. Therefore, the proposed algorithm can process non-rectified stereo image pairs, maintaining the computational load within reasonable levels. © 2009 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A comparative study of invariant descriptors for shape retrieval.\n \n \n\n\n \n Amanatiadis, A., Kaburlasos, V., Gasteratos, A., & Papadakis, S.\n\n\n \n\n\n\n In 2009 IEEE International Workshop on Imaging Systems and Techniques, IST 2009 - Proceedings, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {A comparative study of invariant descriptors for shape retrieval},\n type = {inproceedings},\n year = {2009},\n keywords = {Image retrieval system,Invariant descriptors,Shape matching,Shape representation},\n id = {618722f8-6bab-3524-bed4-a5516063d461},\n created = {2024-03-30T13:53:02.497Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.497Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents a comparative study between scale, rotation and translation invariant descriptors for shape representation and retrieval. Specifically, we studied Fourier, angular radial transform and image moment descriptors for shape representation. Since shape is one of the most widely used image feature exploited in content-based image retrieval systems, we studied for each descriptor, the number of coefficients needed for indexing and their retrieval performance. Results showed that moment descriptors present the best performance in both terms of shape representation quality as well as in the amount of required coefficients. © 2009 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Kaburlasos, V.G. and Gasteratos, A. and Papadakis, S.E.},\n doi = {10.1109/IST.2009.5071672},\n booktitle = {2009 IEEE International Workshop on Imaging Systems and Techniques, IST 2009 - Proceedings}\n}
\n
\n\n\n
\n This paper presents a comparative study between scale, rotation and translation invariant descriptors for shape representation and retrieval. Specifically, we studied Fourier, angular radial transform and image moment descriptors for shape representation. Since shape is one of the most widely used image feature exploited in content-based image retrieval systems, we studied for each descriptor, the number of coefficients needed for indexing and their retrieval performance. Results showed that moment descriptors present the best performance in both terms of shape representation quality as well as in the amount of required coefficients. © 2009 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Pose estimation of a volant platform with a monocular visuo-inertial system.\n \n \n\n\n \n Kyriakoulis, N., Karakasis, E., Gasteratos, A., & Amanatiadis, A.\n\n\n \n\n\n\n In 2009 IEEE International Workshop on Imaging Systems and Techniques, IST 2009 - Proceedings, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Pose estimation of a volant platform with a monocular visuo-inertial system},\n type = {inproceedings},\n year = {2009},\n keywords = {Inertial pose estimation,Visual pose estimation,Visuo-inertial fusion},\n id = {b5c7fe18-326b-35eb-a68f-42c7e74db60b},\n created = {2024-03-30T13:53:02.504Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.504Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {One of the serious problems in robotics applications is the estimation of the robot's pose. A lot of research effort has been put on finding the pose via inertial and proximity sensors. However, the last decades many systems adopt vision to estimate the pose, by using homographies and projection geometry. In this paper the pose estimation is achieved by the identification of a geometrically known platform from one camera and from the measurements of an inertial unit. The extended Kalman filter (EKF) is used for data fusion and error compensation. The novelty of this system is that the visual sensor and the inertial unit are mounted on different mobile systems. The proposed pose estimation system exhibits high accuracy in real-time. © 2009 IEEE.},\n bibtype = {inproceedings},\n author = {Kyriakoulis, N. and Karakasis, E. and Gasteratos, A. and Amanatiadis, A.},\n doi = {10.1109/IST.2009.5071679},\n booktitle = {2009 IEEE International Workshop on Imaging Systems and Techniques, IST 2009 - Proceedings}\n}
\n
\n\n\n
\n One of the serious problems in robotics applications is the estimation of the robot's pose. A lot of research effort has been put on finding the pose via inertial and proximity sensors. However, the last decades many systems adopt vision to estimate the pose, by using homographies and projection geometry. In this paper the pose estimation is achieved by the identification of a geometrically known platform from one camera and from the measurements of an inertial unit. The extended Kalman filter (EKF) is used for data fusion and error compensation. The novelty of this system is that the visual sensor and the inertial unit are mounted on different mobile systems. The proposed pose estimation system exhibits high accuracy in real-time. © 2009 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A survey on evaluation methods for image interpolation.\n \n \n\n\n \n Amanatiadis, A., & Andreadis, I.\n\n\n \n\n\n\n Measurement Science and Technology, 20(10). 2009.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A survey on evaluation methods for image interpolation},\n type = {article},\n year = {2009},\n keywords = {Error measurement,Evaluation methods,Interpolation,Performance analysis},\n volume = {20},\n id = {6db669ac-dae1-3e66-9238-e7241b811e54},\n created = {2024-03-30T13:53:02.555Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.555Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Image interpolation is applied to Euclidean, affine and projective transformations in numerous imaging applications. However, due to the unique characteristics and wide applications of image interpolation, a separate study of their evaluation methods is crucial. The paper studies different existing methods for the evaluation of image interpolation techniques. Furthermore, an evaluation method utilizing ground truth images for the comparisons is proposed. Two main classes of analysis are proposed as the basis for the assessments: performance evaluation and cost evaluation. The presented methods are briefly described, followed by comparative discussions. This survey provides information for the appropriate use of the existing evaluation methods and their improvement, assisting also in the designing of new evaluation methods and techniques. © 2009 IOP Publishing Ltd.},\n bibtype = {article},\n author = {Amanatiadis, A. and Andreadis, I.},\n doi = {10.1088/0957-0233/20/10/104015},\n journal = {Measurement Science and Technology},\n number = {10}\n}
\n
\n\n\n
\n Image interpolation is applied to Euclidean, affine and projective transformations in numerous imaging applications. However, due to the unique characteristics and wide applications of image interpolation, a separate study of their evaluation methods is crucial. The paper studies different existing methods for the evaluation of image interpolation techniques. Furthermore, an evaluation method utilizing ground truth images for the comparisons is proposed. Two main classes of analysis are proposed as the basis for the assessments: performance evaluation and cost evaluation. The presented methods are briefly described, followed by comparative discussions. This survey provides information for the appropriate use of the existing evaluation methods and their improvement, assisting also in the designing of new evaluation methods and techniques. © 2009 IOP Publishing Ltd.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Performance evaluation techniques for image scaling algorithms.\n \n \n\n\n \n Amanatiadis, A., & Andreadis, I.\n\n\n \n\n\n\n In IST 2008 - IEEE Workshop on Imaging Systems and Techniques Proceedings, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Performance evaluation techniques for image scaling algorithms},\n type = {inproceedings},\n year = {2008},\n keywords = {Digital image scaling,Method characterization,Performance assessment,Zooming evaluation},\n id = {0af0adeb-e437-3926-ae5a-50fb59c49f79},\n created = {2024-03-30T13:53:02.559Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.559Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {The paper studies different existing methods for image scaling algorithm evaluation. In addition, one new evaluation method is proposed which is directly related to ground truth optically scaled images. Digital image scaling is classified into the general interpolation scheme. However, due to the unique characteristics and wide applications of image scaling, a separate study of their evaluation methods is crucial. Two main classes of criteria are used as the basis for the assessments: The fidelity criteria and the computational complexity criteria. All methods are briefly described and comparative discussions are provided. This survey is helpful for an appropriate use of existing evaluation methods and for improving their performance as well as for systematically designing new evaluation methods. ©2008 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Andreadis, I.},\n doi = {10.1109/IST.2008.4659952},\n booktitle = {IST 2008 - IEEE Workshop on Imaging Systems and Techniques Proceedings}\n}
\n
\n\n\n
\n The paper studies different existing methods for image scaling algorithm evaluation. In addition, one new evaluation method is proposed which is directly related to ground truth optically scaled images. Digital image scaling is classified into the general interpolation scheme. However, due to the unique characteristics and wide applications of image scaling, a separate study of their evaluation methods is crucial. Two main classes of criteria are used as the basis for the assessments: The fidelity criteria and the computational complexity criteria. All methods are briefly described and comparative discussions are provided. This survey is helpful for an appropriate use of existing evaluation methods and for improving their performance as well as for systematically designing new evaluation methods. ©2008 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Image sequence stabilization using fuzzy kalman filtering and log-polar transformation.\n \n \n\n\n \n Kyriakoulis, N., Gasteratos, A., & Amanatiadis, A.\n\n\n \n\n\n\n In VISAPP 2008 - 3rd International Conference on Computer Vision Theory and Applications, Proceedings, volume 2, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Image sequence stabilization using fuzzy kalman filtering and log-polar transformation},\n type = {inproceedings},\n year = {2008},\n keywords = {Fuzzy systems,Image stabilization,Kalman filter,Log-polar,Optical flow},\n volume = {2},\n id = {28a1f912-ca79-3cb0-998b-fa3eff06ad62},\n created = {2024-03-30T13:53:02.614Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.614Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Digital image stabilization (DIS) is the process that compensates the undesired fluctuations of a frame's position in an image sequence by means of digital image processing techniques. DIS techniques usually comprise two successive units. The first one estimates the motion and the successive one compensates it. In this paper, a novel digital image stabilization technique is proposed, which is featured with a fuzzy Kalman estimation of the global motion vector in the log-polar plane. The global motion vector is extracted using four local motion vectors computed on respective sub-images in the log-polar plane. The proposed technique exploits both the advantages of the fuzzy Kalman system and the log-polar plane. The compensation is based on the motion estimation in the log-polar domain, filtered by the fuzzy Kalman system. The described technique outperforms in terms of response times, the output quality and the level of compensation.},\n bibtype = {inproceedings},\n author = {Kyriakoulis, N. and Gasteratos, A. and Amanatiadis, A.},\n booktitle = {VISAPP 2008 - 3rd International Conference on Computer Vision Theory and Applications, Proceedings}\n}
\n
\n\n\n
\n Digital image stabilization (DIS) is the process that compensates the undesired fluctuations of a frame's position in an image sequence by means of digital image processing techniques. DIS techniques usually comprise two successive units. The first one estimates the motion and the successive one compensates it. In this paper, a novel digital image stabilization technique is proposed, which is featured with a fuzzy Kalman estimation of the global motion vector in the log-polar plane. The global motion vector is extracted using four local motion vectors computed on respective sub-images in the log-polar plane. The proposed technique exploits both the advantages of the fuzzy Kalman system and the log-polar plane. The compensation is based on the motion estimation in the log-polar domain, filtered by the fuzzy Kalman system. The described technique outperforms in terms of response times, the output quality and the level of compensation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Real-time servo control architecture for parallel stabilization and manipulation of tele-operated robots.\n \n \n\n\n \n Amanatiadis, A., & Papadakis, S.\n\n\n \n\n\n\n In International Conference on Automation, Robotics and Control Systems 2008, ARCS 2008, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Real-time servo control architecture for parallel stabilization and manipulation of tele-operated robots},\n type = {inproceedings},\n year = {2008},\n keywords = {Automatic stabilization,Head-tracking control,Human-machine interaction,Real-time servo control Architecture},\n id = {eadfdf4a-e3c6-37b0-9226-1530378270e5},\n created = {2024-03-30T13:53:02.617Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.617Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents a control architecture for semi-autonomous tele-operated robots. The architecture is based on two parallel behavior modules: automatic mechanical image stabilization control and remote head posing control. All processes are performed in Linux-based Real-time Operating Systems using open source libraries under GPL license. Detailed behavior is executed through priorities to prevent the computation waste and simultaneous and non-synchronized accesses to servo motors. Experimental results showed that our system is capable of satisfying the hard-real time requirements for the servo control, with great precision. The system has an open modular and scalable architecture with high performance and low cost.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Papadakis, S.E.},\n booktitle = {International Conference on Automation, Robotics and Control Systems 2008, ARCS 2008}\n}
\n
\n\n\n
\n This paper presents a control architecture for semi-autonomous tele-operated robots. The architecture is based on two parallel behavior modules: automatic mechanical image stabilization control and remote head posing control. All processes are performed in Linux-based Real-time Operating Systems using open source libraries under GPL license. Detailed behavior is executed through priorities to prevent the computation waste and simultaneous and non-synchronized accesses to servo motors. Experimental results showed that our system is capable of satisfying the hard-real time requirements for the servo control, with great precision. The system has an open modular and scalable architecture with high performance and low cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Development of a stereo vision system for remotely operated robots: A control and video streaming architecture.\n \n \n\n\n \n Amanatiadis, A., Gasteratos, A., Georgoulas, C., Kotoulas, L., & Andreadis, I.\n\n\n \n\n\n\n In VECIMS 2008 - IEEE Conference on Virtual Environments, Human-Computer Interfaces and Measurement Systems Proceedings, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Development of a stereo vision system for remotely operated robots: A control and video streaming architecture},\n type = {inproceedings},\n year = {2008},\n keywords = {Human-machine interaction,Open-source programming,Real-time remote control,Telerobotic vision},\n id = {ca261b5e-63b1-3380-ae35-a4079e65de0f},\n created = {2024-03-30T13:53:02.685Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.685Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper describes the open and flexible architecture of a stereo vision system prototype, using open source software. The system is designed for teleoperated robots and includes a four degrees of freedom stereo head mechanism, a pair of high performance digital cameras, a head tracker and a head mounted display. All processes for the head control and video streaming are performed in Linux-based Real-time Operating Systems using open source libraries under GPL license. Experimental results showed that our system is capable of satisfying the hard-real time requirements for the head control, with great precision, and a low latency for the stereo video streaming. The video streaming management is particularly sophisticated resulting in a flexible, efficient and reliable service. © 2008 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Gasteratos, A. and Georgoulas, C. and Kotoulas, L. and Andreadis, I.},\n doi = {10.1109/VECIMS.2008.4592745},\n booktitle = {VECIMS 2008 - IEEE Conference on Virtual Environments, Human-Computer Interfaces and Measurement Systems Proceedings}\n}
\n
\n\n\n
\n This paper describes the open and flexible architecture of a stereo vision system prototype, using open source software. The system is designed for teleoperated robots and includes a four degrees of freedom stereo head mechanism, a pair of high performance digital cameras, a head tracker and a head mounted display. All processes for the head control and video streaming are performed in Linux-based Real-time Operating Systems using open source libraries under GPL license. Experimental results showed that our system is capable of satisfying the hard-real time requirements for the head control, with great precision, and a low latency for the stereo video streaming. The video streaming management is particularly sophisticated resulting in a flexible, efficient and reliable service. © 2008 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Design and implementation of a fuzzy area-based image-scaling technique.\n \n \n\n\n \n Amanatiadis, A., Andreadis, I., & Konstantinidis, K.\n\n\n \n\n\n\n IEEE Transactions on Instrumentation and Measurement, 57(8). 2008.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Design and implementation of a fuzzy area-based image-scaling technique},\n type = {article},\n year = {2008},\n keywords = {Digital image scaling,Fuzzy area-based interpolation,Fuzzy zooming controller,Real-time image scaling},\n volume = {57},\n id = {b70c7025-68f9-3d6f-8ce5-807dce994592},\n created = {2024-03-30T13:53:02.685Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.685Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, we propose the design and implementation of an interpolation scheme for performing image scaling by utilizing a dynamic mask combined with a sophisticated neighborhood averaging fuzzy algorithm. The functions that contribute to the final interpolated image are the areas of the input pixels, overlapped by a dynamic mask, and the difference in intensity between the input pixels. Fuzzy if-then rules for these two functions are presented to carry out the interpolation task. Simulation results have shown a fine high-frequency response and a low interpolation error, in comparison with other widely used algorithms. The interpolation can be applied to both gray-scale and color images for any scaling factor. The proposed hardware structure is implemented in a field-programmable gate array (FPGA) chip and is based on a sequence of pipeline stages and parallel processing to minimize computation times. The fuzzy image interpolation implementation combines a fuzzy inference system and an image-interpolation technique in one hardware system. Its main features are the ability to accurately approximate the Gaussian membership functions used by the fuzzy inference system with very few memory requirements and its high-frequency performance of 65 MHz, making it appropriate for real-time imaging applications. The system can magnify gray-scale images of up to 10-bit resolution. The maximum input image size is 1024 × 1024 pixels for a maximum of 800% magnification. © 2008 IEEE.},\n bibtype = {article},\n author = {Amanatiadis, A. and Andreadis, I. and Konstantinidis, K.},\n doi = {10.1109/TIM.2008.925723},\n journal = {IEEE Transactions on Instrumentation and Measurement},\n number = {8}\n}
\n
\n\n\n
\n In this paper, we propose the design and implementation of an interpolation scheme for performing image scaling by utilizing a dynamic mask combined with a sophisticated neighborhood averaging fuzzy algorithm. The functions that contribute to the final interpolated image are the areas of the input pixels, overlapped by a dynamic mask, and the difference in intensity between the input pixels. Fuzzy if-then rules for these two functions are presented to carry out the interpolation task. Simulation results have shown a fine high-frequency response and a low interpolation error, in comparison with other widely used algorithms. The interpolation can be applied to both gray-scale and color images for any scaling factor. The proposed hardware structure is implemented in a field-programmable gate array (FPGA) chip and is based on a sequence of pipeline stages and parallel processing to minimize computation times. The fuzzy image interpolation implementation combines a fuzzy inference system and an image-interpolation technique in one hardware system. Its main features are the ability to accurately approximate the Gaussian membership functions used by the fuzzy inference system with very few memory requirements and its high-frequency performance of 65 MHz, making it appropriate for real-time imaging applications. The system can magnify gray-scale images of up to 10-bit resolution. The maximum input image size is 1024 × 1024 pixels for a maximum of 800% magnification. © 2008 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n An integrated architecture for adaptive image stabilization in zooming operation.\n \n \n\n\n \n Amanatiadis, A., & Andreadis, I.\n\n\n \n\n\n\n IEEE Transactions on Consumer Electronics, 54(2). 2008.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {An integrated architecture for adaptive image stabilization in zooming operation},\n type = {article},\n year = {2008},\n keywords = {Cameras,Computer architecture,Interpolation,Motion estimation,Optical imaging,Optical sensors,Pixel},\n volume = {54},\n id = {1f06b344-a659-3695-9eb8-b5be69b6ea5f},\n created = {2024-03-30T13:53:02.739Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.739Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper presents a novel architecture for integrating digital stabilizer with the camera zooming process. The proposed architecture scheme allows integration for both optical and digital zooming operation scenarios. The stabilizer adjusts its operational parameters from the current optical zooming status for a refined local motion estimation. Subsequently, the global motion compensation vectors, produced by the digital stabilizer, are sent to the digital zooming system, where the image compensation is merged within the interpolation process. Experimental results indicate that the proposed architecture can improve not only the quantitative performance of digital stabilization but also the computational efficiency when zooming is employed. © 2008 IEEE.},\n bibtype = {article},\n author = {Amanatiadis, A. and Andreadis, I.},\n doi = {10.1109/TCE.2008.4560136},\n journal = {IEEE Transactions on Consumer Electronics},\n number = {2}\n}
\n
\n\n\n
\n This paper presents a novel architecture for integrating digital stabilizer with the camera zooming process. The proposed architecture scheme allows integration for both optical and digital zooming operation scenarios. The stabilizer adjusts its operational parameters from the current optical zooming status for a refined local motion estimation. Subsequently, the global motion compensation vectors, produced by the digital stabilizer, are sent to the digital zooming system, where the image compensation is merged within the interpolation process. Experimental results indicate that the proposed architecture can improve not only the quantitative performance of digital stabilization but also the computational efficiency when zooming is employed. © 2008 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Methods and techniques for intelligent navigation and manipulation for bomb disposal and rescue operations.\n \n \n\n\n \n Beltrán-González, C., Gasteratos, A., Amanatiadis, A., Chrysostomou, D., Guzman, R., Tóth, A., Szollosi, L., Juhász, A., & Galambos, P.\n\n\n \n\n\n\n In SSRR2007 - IEEE International Workshop on Safety, Security and Rescue Robotics Proceedings, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Methods and techniques for intelligent navigation and manipulation for bomb disposal and rescue operations},\n type = {inproceedings},\n year = {2007},\n keywords = {Hazardous,Rescue,Robotics,Teleoperation},\n id = {0721cc81-2155-3a8f-86bf-bdd730b7c92f},\n created = {2024-03-30T13:53:02.750Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.750Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Handing a teleoperated robotic mechanism demands special skills and involves particular problems. Especially in cases of robots dealing with rescue operations or bomb disposal. In such cases any lost in communications might arise unpredictable results. Also either a bomb or a survivor need attentional handling. In this paper we describe automatic methods and techniques developed on a multifunctional teleoperated robot. These intend to assist both the robot and the human operator in accomplishing their mission towards rescue or bomb disposal.},\n bibtype = {inproceedings},\n author = {Beltrán-González, C. and Gasteratos, A. and Amanatiadis, A. and Chrysostomou, D. and Guzman, R. and Tóth, A. and Szollosi, L. and Juhász, A. and Galambos, P.},\n doi = {10.1109/SSRR.2007.4381291},\n booktitle = {SSRR2007 - IEEE International Workshop on Safety, Security and Rescue Robotics Proceedings}\n}
\n
\n\n\n
\n Handing a teleoperated robotic mechanism demands special skills and involves particular problems. Especially in cases of robots dealing with rescue operations or bomb disposal. In such cases any lost in communications might arise unpredictable results. Also either a bomb or a survivor need attentional handling. In this paper we describe automatic methods and techniques developed on a multifunctional teleoperated robot. These intend to assist both the robot and the human operator in accomplishing their mission towards rescue or bomb disposal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n An integrated dynamic image stabilizer applied to zooming systems.\n \n \n\n\n \n Amanatiadis, A., & Andreadis, I.\n\n\n \n\n\n\n In Conference Record - IEEE Instrumentation and Measurement Technology Conference, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {An integrated dynamic image stabilizer applied to zooming systems},\n type = {inproceedings},\n year = {2007},\n keywords = {Dynamic zooming stabilization system,Image stabilization,Image zooming},\n id = {310b1e7a-3d91-3064-85e0-9e47dde31aa4},\n created = {2024-03-30T13:53:02.795Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.795Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, we propose a new integrated architecture for a dependent stabilization and zooming process. The stabilization system adapts its ope rational parameters from the current optical zooming status and subsequently stabilization parameters are introduced, as an additional input, to the digital zooming system for the interpolation process. With this approach the stabilization system is dynamic, with no permanent setup only for the non-zooming operation of the camera, making the whole architecture dependent and not strictly sequential. Additionally, the digital image stabilization system introduces an input to the digital zooming system in order to carry out simultaneously the image compensation and the image zooming within the interpolation procedure. Experimental results showed that the proposed integrated architecture exhibit better performance compared to the existing conventional stabilization architecture, when zooming process is employed. Furthermore, the proposed system has less computational complexity minimizing the observed latency of the conventional systems. © 2007 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Andreadis, I.},\n booktitle = {Conference Record - IEEE Instrumentation and Measurement Technology Conference}\n}
\n
\n\n\n
\n In this paper, we propose a new integrated architecture for a dependent stabilization and zooming process. The stabilization system adapts its ope rational parameters from the current optical zooming status and subsequently stabilization parameters are introduced, as an additional input, to the digital zooming system for the interpolation process. With this approach the stabilization system is dynamic, with no permanent setup only for the non-zooming operation of the camera, making the whole architecture dependent and not strictly sequential. Additionally, the digital image stabilization system introduces an input to the digital zooming system in order to carry out simultaneously the image compensation and the image zooming within the interpolation procedure. Experimental results showed that the proposed integrated architecture exhibit better performance compared to the existing conventional stabilization architecture, when zooming process is employed. Furthermore, the proposed system has less computational complexity minimizing the observed latency of the conventional systems. © 2007 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A rotational and translational image stabilization system for remotely operated robots.\n \n \n\n\n \n Amanatiadis, A., Andreadis, I., Gasteratos, A., & Kyriakoulis, N.\n\n\n \n\n\n\n In Proceedings of the 2007 IEEE International Workshop on Imaging Systems and Techniques, IST'07, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {A rotational and translational image stabilization system for remotely operated robots},\n type = {inproceedings},\n year = {2007},\n keywords = {Image stabilization,Robot navigation,Visuo-inertial model},\n id = {bbd7a7ff-f76e-3084-84fc-8baaf74cc896},\n created = {2024-03-30T13:53:02.886Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.886Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Remotely operated robots equipped with on board cameras, apart from providing video input to operators, perform optical measurements to assist their navigation as well. Such image processing algorithms require image sequences, free of high frequency unwanted movements, in order to generate their optimal results. Image stabilization is the process which removes the undesirable position fluctuations of a video sequence improving, therefore, its visual quality. In this paper, we introduce the implementation of an image stabilization system that utilizes input from an on board camera and a gyrosensor. The frame sequence is processed by an optic flow algorithm and the inertial data is processed by a discrete Kalman filter. The compensation is performed using two servo motors for the pan and tilt movements and frame shifting for the vertical and horizontal movements, Experimental results of the robot head, have shown fine stabilized image sequences and a system capable of processing 320 × 240 Pixel image sequences at approximately 10 frames/ sec, with a maximum acceleration of 4 deg/sec2. © 2007 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Andreadis, I. and Gasteratos, A. and Kyriakoulis, N.},\n doi = {10.1109/ist.2007.379602},\n booktitle = {Proceedings of the 2007 IEEE International Workshop on Imaging Systems and Techniques, IST'07}\n}
\n
\n\n\n
\n Remotely operated robots equipped with on board cameras, apart from providing video input to operators, perform optical measurements to assist their navigation as well. Such image processing algorithms require image sequences, free of high frequency unwanted movements, in order to generate their optimal results. Image stabilization is the process which removes the undesirable position fluctuations of a video sequence improving, therefore, its visual quality. In this paper, we introduce the implementation of an image stabilization system that utilizes input from an on board camera and a gyrosensor. The frame sequence is processed by an optic flow algorithm and the inertial data is processed by a discrete Kalman filter. The compensation is performed using two servo motors for the pan and tilt movements and frame shifting for the vertical and horizontal movements, Experimental results of the robot head, have shown fine stabilized image sequences and a system capable of processing 320 × 240 Pixel image sequences at approximately 10 frames/ sec, with a maximum acceleration of 4 deg/sec2. © 2007 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n A log-polar interpolation applied to image scaling.\n \n \n\n\n \n Amanatiadis, A., Andreadis, I., & Gasteratos, A.\n\n\n \n\n\n\n In Proceedings of the 2007 IEEE International Workshop on Imaging Systems and Techniques, IST'07, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {A log-polar interpolation applied to image scaling},\n type = {inproceedings},\n year = {2007},\n keywords = {Digital image scaling,Log-polar interpolation,Log-polar neighbor model,Zooming},\n id = {7ca7b28e-62da-3a8c-8e87-7f23cff5629d},\n created = {2024-03-30T13:53:02.926Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.926Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper proposes a bio-inspired interpolation algorithm suitable for image scaling. A log-polar neighbor model is adopted, utilizing the feature of applying larger weights to pixels at the center of the interpolation region and logarithmically decreasing weights to pixels away from the center, The interpolation is performed in the Cartesian plane without requiring the full transformation of the image to the log-polar plane, Experiments show that in both visual comparisons and quantitative analysis, the results extracted by the proposed log-polar neighbor model are better than those extracted from pixel repetition, bilinear and bicubic interpolation, ©2007 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Andreadis, I. and Gasteratos, A.},\n doi = {10.1109/ist.2007.379610},\n booktitle = {Proceedings of the 2007 IEEE International Workshop on Imaging Systems and Techniques, IST'07}\n}
\n
\n\n\n
\n This paper proposes a bio-inspired interpolation algorithm suitable for image scaling. A log-polar neighbor model is adopted, utilizing the feature of applying larger weights to pixels at the center of the interpolation region and logarithmically decreasing weights to pixels away from the center, The interpolation is performed in the Cartesian plane without requiring the full transformation of the image to the log-polar plane, Experiments show that in both visual comparisons and quantitative analysis, the results extracted by the proposed log-polar neighbor model are better than those extracted from pixel repetition, bilinear and bicubic interpolation, ©2007 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Fuzzy area-based image scaling.\n \n \n\n\n \n Amanatiadis, A., Andreadis, I., & Konstantinidis, K.\n\n\n \n\n\n\n In Conference Record - IEEE Instrumentation and Measurement Technology Conference, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Fuzzy area-based image scaling},\n type = {inproceedings},\n year = {2007},\n keywords = {Digital image scaling,Fuzzy area-based interpolation,Fuzzy zooming},\n id = {d8e86b11-5250-3878-8b13-4be84c37d1c2},\n created = {2024-03-30T13:53:02.949Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.949Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {In this paper, we propose an interpolation scheme for performing image scaling by utilizing a dynamic mask combined with a sophisticated neighborhood averaging fuzzy algorithm. The functions that contribute to the final interpolated image are the areas of the input pixels, overlapped by a dynamic mask and the differences in intensity of the input pixels. Simple fuzzy if-then rules for these two functions are presented in order to carry out the interpolation task. Simulation results have shown a fine high-frequency response and low interpolation error in comparison to other widely used algorithms. The interpolation can be applied to both gray scale and color images, for any scaling factor. © 2007 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Andreadis, I. and Konstantinidis, K.},\n doi = {10.1109/imtc.2007.379084},\n booktitle = {Conference Record - IEEE Instrumentation and Measurement Technology Conference}\n}
\n
\n\n\n
\n In this paper, we propose an interpolation scheme for performing image scaling by utilizing a dynamic mask combined with a sophisticated neighborhood averaging fuzzy algorithm. The functions that contribute to the final interpolated image are the areas of the input pixels, overlapped by a dynamic mask and the differences in intensity of the input pixels. Simple fuzzy if-then rules for these two functions are presented in order to carry out the interpolation task. Simulation results have shown a fine high-frequency response and low interpolation error in comparison to other widely used algorithms. The interpolation can be applied to both gray scale and color images, for any scaling factor. © 2007 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Comparison of data fusion techniques for robot navigation.\n \n \n\n\n \n Kyriakoulis, N., Gasteratos, A., & Amanatiadis, A.\n\n\n \n\n\n\n Volume 3955 LNAI 2006.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Comparison of data fusion techniques for robot navigation},\n type = {book},\n year = {2006},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n volume = {3955 LNAI},\n id = {18e6e0c1-0659-375c-af82-108fce94a57a},\n created = {2024-03-30T13:53:02.983Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:02.983Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {This paper proposes and compares several data fusion techniques for robot navigation. The fusion techniques investigated here are several topologies of the Kalman filter. The problem that had been simulated is the navigation of a robot carrying two sensors, one Global Positioning System (GPS) and one Inertial Navigation System (INS). For each of the above topologies, the statistic error and its, mean value, variance and standard deviation were examined. © Springer-Verlag Berlin Heidelberg 2006.},\n bibtype = {book},\n author = {Kyriakoulis, N. and Gasteratos, A. and Amanatiadis, A.},\n doi = {10.1007/11752912_65}\n}
\n
\n\n\n
\n This paper proposes and compares several data fusion techniques for robot navigation. The fusion techniques investigated here are several topologies of the Kalman filter. The problem that had been simulated is the navigation of a robot carrying two sensors, one Global Positioning System (GPS) and one Inertial Navigation System (INS). For each of the above topologies, the statistic error and its, mean value, variance and standard deviation were examined. © Springer-Verlag Berlin Heidelberg 2006.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n Defining the main factors of quality of service in mobile telephony.\n \n \n\n\n \n Amanatiadis, A., Drakatos, K., Tsironis, L., & Moustakis, V.\n\n\n \n\n\n\n In Second International Conference on Wireless and Mobile Communications, ICWMC 2006, 2006. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Defining the main factors of quality of service in mobile telephony},\n type = {inproceedings},\n year = {2006},\n id = {323424c8-730d-321e-82e4-71949fd76c61},\n created = {2024-03-30T13:53:03.010Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:03.010Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {Due to the recent technological advances, many advanced mobile telephony services have been introduced. Thus the proper selection and the efficient service among various alternatives at hand are keys for selecting the best in the customer's point of view. The purpose of this study is to analyze the mobile telephony Quality of Service factors in the viewpoint of service selection. This study explores the factors to be considered by customers when choosing a telephony service. A hierarchical framework, supporting the mobile telephony Quality of Service, is presented. The framework is composed of a novel hierarchical structure of criteria and sub-criteria. We first describe the basics on Mobile Telephony Technology. The terms of Quality and Quality of Service are discussed from the viewpoint of end users (customers) satisfaction. Finally we analyse in deep each one of the factors, presented herein and we conclude with a discussion on our findings. © 2006 IEEE.},\n bibtype = {inproceedings},\n author = {Amanatiadis, A. and Drakatos, K. and Tsironis, L. and Moustakis, V.},\n doi = {10.1109/ICWMC.2006.37},\n booktitle = {Second International Conference on Wireless and Mobile Communications, ICWMC 2006}\n}
\n
\n\n\n
\n Due to the recent technological advances, many advanced mobile telephony services have been introduced. Thus the proper selection and the efficient service among various alternatives at hand are keys for selecting the best in the customer's point of view. The purpose of this study is to analyze the mobile telephony Quality of Service factors in the viewpoint of service selection. This study explores the factors to be considered by customers when choosing a telephony service. A hierarchical framework, supporting the mobile telephony Quality of Service, is presented. The framework is composed of a novel hierarchical structure of criteria and sub-criteria. We first describe the basics on Mobile Telephony Technology. The terms of Quality and Quality of Service are discussed from the viewpoint of end users (customers) satisfaction. Finally we analyse in deep each one of the factors, presented herein and we conclude with a discussion on our findings. © 2006 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n Digital image scaling.\n \n \n\n\n \n Andreadis, I., & Amanatiadis, A.\n\n\n \n\n\n\n In Conference Record - IEEE Instrumentation and Measurement Technology Conference, volume 3, 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Digital image scaling},\n type = {inproceedings},\n year = {2005},\n keywords = {Area pixel interpolation,Real-time image scaling},\n volume = {3},\n id = {c5010c5c-1bc5-31b9-a40b-9a8e4d4361c9},\n created = {2024-03-30T13:53:03.041Z},\n file_attached = {false},\n profile_id = {4bc011ba-7716-36d2-b7a8-61c7a7600de9},\n last_modified = {2024-03-30T13:53:03.041Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {The proposed scaling algorithm outperforms other standard and widely used scaling techniques. The algorithm uses a mask of maximum four pixels and calculates the final luminosity of each pixel combining two factors; the percentage of area that mask coves from each source pixel and the difference in luminosity between the source pixels. The interpolation is capable of scaling both grey-scale and color images of any resolution in any scaling factor. Its key characteristics and low complexity make the interpolation very fast and capable of real time implementation. The performance results in a variety of standard tests are presented and compared to other scaling algorithms. © 2005 IEEE.},\n bibtype = {inproceedings},\n author = {Andreadis, I. and Amanatiadis, A.},\n booktitle = {Conference Record - IEEE Instrumentation and Measurement Technology Conference}\n}
\n
\n\n\n
\n The proposed scaling algorithm outperforms other standard and widely used scaling techniques. The algorithm uses a mask of maximum four pixels and calculates the final luminosity of each pixel combining two factors; the percentage of area that mask coves from each source pixel and the difference in luminosity between the source pixels. The interpolation is capable of scaling both grey-scale and color images of any resolution in any scaling factor. Its key characteristics and low complexity make the interpolation very fast and capable of real time implementation. The performance results in a variety of standard tests are presented and compared to other scaling algorithms. © 2005 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);