var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/service/mendeley/aba9653c-d139-3f95-aad8-969c487ed2f3/group/13ef799e-a79c-38bd-a5cc-187330c349ae?jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/service/mendeley/aba9653c-d139-3f95-aad8-969c487ed2f3/group/13ef799e-a79c-38bd-a5cc-187330c349ae?jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/service/mendeley/aba9653c-d139-3f95-aad8-969c487ed2f3/group/13ef799e-a79c-38bd-a5cc-187330c349ae?jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Instance Selection Framework for Alzheimer’s Disease Classification Using Multiple Regions of Interest and Atlas Integration.\n \n \n \n \n\n\n \n Castro-Silva., J., A.; Moreno-García., M.; Guachi-Guachi., L.; and Peluffo-Ordóñez., D., H.\n\n\n \n\n\n\n In Proceedings of the 13th International Conference on Pattern Recognition Applications and Methods - ICPRAM, pages 453-460, 2024. SciTePress\n \n\n\n\n
\n\n\n\n \n \n \"InstanceWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Instance Selection Framework for Alzheimer’s Disease Classification Using Multiple Regions of Interest and Atlas Integration},\n type = {inproceedings},\n year = {2024},\n pages = {453-460},\n websites = {https://www.scitepress.org/Link.aspx?doi=10.5220/0012469600003654},\n publisher = {SciTePress},\n institution = {INSTICC},\n id = {8b26ce48-e2d2-33db-a442-02ece8e046e9},\n created = {2024-03-06T14:15:39.638Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2024-03-06T14:15:39.638Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {icpram24},\n source_type = {conference},\n private_publication = {false},\n abstract = {Optimal selection of informative instances from a dataset is critical for constructing accurate predictive models. As databases expand, leveraging instance selection techniques becomes imperative to condense data into a more manageable size. This research unveils a novel framework designed to strategically identify and choose the most informative 2D brain image slices for Alzheimer’s disease classification. Such a framework integrates annotations from multiple regions of interest across multiple atlases. The proposed framework consists of six core components: 1) Atlas merging for ROI annotation and hemisphere separation. 2) Image preprocessing to extract informative slices. 3) Dataset construction to prevent data leakage, select subjects, and split data. 4) Data generation for memory-efficient batches. 5) Model construction for diverse classification training and testing. 6) Weighted ensemble for combining predictions from multiple models with a single learning algorithm. Our instanc e selection framework was applied to construct Transformer-based classification models, demonstrating an overall accuracy of approximately 98.33% in distinguishing between Cognitively Normal and Alzheimer’s cases at the subject level. It exhibited enhancements of 3.68%, 3.01%, 3.62% for sagittal, coronal, and axial planes respectively in comparison with the percentile technique.},\n bibtype = {inproceedings},\n author = {Castro-Silva., Juan A. and Moreno-García., Maria and Guachi-Guachi., Lorena and Peluffo-Ordóñez., Diego H.},\n doi = {10.5220/0012469600003654},\n booktitle = {Proceedings of the 13th International Conference on Pattern Recognition Applications and Methods - ICPRAM}\n}
\n
\n\n\n
\n Optimal selection of informative instances from a dataset is critical for constructing accurate predictive models. As databases expand, leveraging instance selection techniques becomes imperative to condense data into a more manageable size. This research unveils a novel framework designed to strategically identify and choose the most informative 2D brain image slices for Alzheimer’s disease classification. Such a framework integrates annotations from multiple regions of interest across multiple atlases. The proposed framework consists of six core components: 1) Atlas merging for ROI annotation and hemisphere separation. 2) Image preprocessing to extract informative slices. 3) Dataset construction to prevent data leakage, select subjects, and split data. 4) Data generation for memory-efficient batches. 5) Model construction for diverse classification training and testing. 6) Weighted ensemble for combining predictions from multiple models with a single learning algorithm. Our instanc e selection framework was applied to construct Transformer-based classification models, demonstrating an overall accuracy of approximately 98.33% in distinguishing between Cognitively Normal and Alzheimer’s cases at the subject level. It exhibited enhancements of 3.68%, 3.01%, 3.62% for sagittal, coronal, and axial planes respectively in comparison with the percentile technique.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Neural Networks on Noninvasive Electrocardiographic Imaging Reconstructions: Preliminary Results.\n \n \n \n \n\n\n \n Mayorca-Torres, D.; León-Salas, A., J.; and Peluffo-Ordoñez, D., H.\n\n\n \n\n\n\n In Botto-Tobar, M.; Gómez, O., S.; Rosero Miranda, R.; Díaz Cadena, A.; and Luna-Encalada, W., editor(s), Trends in Artificial Intelligence and Computer Engineering, pages 55-63, 2023. Springer Nature Switzerland\n \n\n\n\n
\n\n\n\n \n \n \"NeuralWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Neural Networks on Noninvasive Electrocardiographic Imaging Reconstructions: Preliminary Results},\n type = {inproceedings},\n year = {2023},\n pages = {55-63},\n websites = {https://link.springer.com/chapter/10.1007/978-3-031-25942-5_5},\n publisher = {Springer Nature Switzerland},\n city = {Cham},\n id = {c3e9ed77-6411-3612-9b28-6977afe2ef3d},\n created = {2023-02-13T23:39:20.171Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2023-02-13T23:39:20.171Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {10.1007/978-3-031-25942-5_5},\n source_type = {inproceedings},\n private_publication = {false},\n abstract = {In the reverse electrocardiography (ECG) problem, the objective is to reconstruct the heart's electrical activity from a set of body surface potentials by solving the direct model and the geometry of the torso. Over the years, researchers have used various approaches to solve this problem, from direct, iterative, probabilistic, and those based on deep learning. The interest of the latter, among the wide range of techniques, is because the complexity of the problem can be significantly reduced while increasing the precision of the estimation. In this article, we evaluate the performance of a deep learning-based neural network compared to the Tikhonov method of zero order (ZOT), first (FOT), and second (SOT). Preliminary results show an improvement in performance over real data when Pearson's correlation coefficient (CC) and (RMSE) are calculated. The CC's mean value and standard deviation for the proposed method were 0.960 (0.065), well above ZOT, which was 0.864 (0.047).},\n bibtype = {inproceedings},\n author = {Mayorca-Torres, Dagoberto and León-Salas, Alejandro José and Peluffo-Ordoñez, Diego Hernán},\n editor = {Botto-Tobar, Miguel and Gómez, Omar S and Rosero Miranda, Raul and Díaz Cadena, Angela and Luna-Encalada, Washington},\n booktitle = {Trends in Artificial Intelligence and Computer Engineering}\n}
\n
\n\n\n
\n In the reverse electrocardiography (ECG) problem, the objective is to reconstruct the heart's electrical activity from a set of body surface potentials by solving the direct model and the geometry of the torso. Over the years, researchers have used various approaches to solve this problem, from direct, iterative, probabilistic, and those based on deep learning. The interest of the latter, among the wide range of techniques, is because the complexity of the problem can be significantly reduced while increasing the precision of the estimation. In this article, we evaluate the performance of a deep learning-based neural network compared to the Tikhonov method of zero order (ZOT), first (FOT), and second (SOT). Preliminary results show an improvement in performance over real data when Pearson's correlation coefficient (CC) and (RMSE) are calculated. The CC's mean value and standard deviation for the proposed method were 0.960 (0.065), well above ZOT, which was 0.864 (0.047).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Comparison of Monocular Visual SLAM and Visual Odometry Methods Applied to 3D Reconstruction.\n \n \n \n \n\n\n \n Herrera-Granda, E., P.; Torres-Cantero, J., C.; Rosales, A.; and Peluffo-Ordóñez, D., H.\n\n\n \n\n\n\n Applied Sciences, 13(15). 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Comparison of Monocular Visual SLAM and Visual Odometry Methods Applied to 3D Reconstruction},\n type = {article},\n year = {2023},\n volume = {13},\n websites = {https://www.mdpi.com/2076-3417/13/15/8837},\n id = {7e0288b9-a5e9-3707-a4e5-2a961cd520c7},\n created = {2023-08-02T21:24:17.244Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2023-08-02T21:24:17.244Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {app13158837},\n source_type = {article},\n private_publication = {false},\n abstract = {Pure monocular 3D reconstruction is a complex problem that has attracted the research community&rsquo;s interest due to the affordability and availability of RGB sensors. SLAM, VO, and SFM are disciplines formulated to solve the 3D reconstruction problem and estimate the camera&rsquo;s ego-motion; so, many methods have been proposed. However, most of these methods have not been evaluated on large datasets and under various motion patterns, have not been tested under the same metrics, and most of them have not been evaluated following a taxonomy, making their comparison and selection difficult. In this research, we performed a comparison of ten publicly available SLAM and VO methods following a taxonomy, including one method for each category of the primary taxonomy, three machine-learning-based methods, and two updates of the best methods to identify the advantages and limitations of each category of the taxonomy and test whether the addition of machine learning or updates on those methods improved them significantly. Thus, we evaluated each algorithm using the TUM-Mono dataset and benchmark, and we performed an inferential statistical analysis to identify the significant differences through its metrics. The results determined that the sparse-direct methods significantly outperformed the rest of the taxonomy, and fusing them with machine learning techniques significantly enhanced the geometric-based methods&rsquo; performance from different perspectives.},\n bibtype = {article},\n author = {Herrera-Granda, Erick P and Torres-Cantero, Juan C and Rosales, Andrés and Peluffo-Ordóñez, Diego H},\n doi = {10.3390/app13158837},\n journal = {Applied Sciences},\n number = {15}\n}
\n
\n\n\n
\n Pure monocular 3D reconstruction is a complex problem that has attracted the research community’s interest due to the affordability and availability of RGB sensors. SLAM, VO, and SFM are disciplines formulated to solve the 3D reconstruction problem and estimate the camera’s ego-motion; so, many methods have been proposed. However, most of these methods have not been evaluated on large datasets and under various motion patterns, have not been tested under the same metrics, and most of them have not been evaluated following a taxonomy, making their comparison and selection difficult. In this research, we performed a comparison of ten publicly available SLAM and VO methods following a taxonomy, including one method for each category of the primary taxonomy, three machine-learning-based methods, and two updates of the best methods to identify the advantages and limitations of each category of the taxonomy and test whether the addition of machine learning or updates on those methods improved them significantly. Thus, we evaluated each algorithm using the TUM-Mono dataset and benchmark, and we performed an inferential statistical analysis to identify the significant differences through its metrics. The results determined that the sparse-direct methods significantly outperformed the rest of the taxonomy, and fusing them with machine learning techniques significantly enhanced the geometric-based methods’ performance from different perspectives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A comparative study of Machine Learning-based classification of Tomato fungal diseases: Application of GLCM texture features.\n \n \n \n \n\n\n \n Nyasulu, C.; Diattara, A.; Traore, A.; Ba, C.; Diedhiou, P., M.; Sy, Y.; Raki, H.; and Peluffo-Ordóñez, D., H.\n\n\n \n\n\n\n Heliyon. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A comparative study of Machine Learning-based classification of Tomato fungal diseases: Application of GLCM texture features},\n type = {article},\n year = {2023},\n websites = {https://www.sciencedirect.com/science/article/pii/S2405844023089053},\n publisher = {Elsevier},\n id = {eaab42b2-017b-3879-a8b3-2956cdb35178},\n created = {2023-11-11T22:11:22.533Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2023-11-11T22:11:22.533Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {nyasulu2023comparative},\n source_type = {article},\n private_publication = {false},\n abstract = {Globally, agriculture remains an important source of food and economic development. Due to various plant diseases, farmers continue to suffer huge yield losses in both quality and quantity. In this study, we explored the potential of using Artificial Neural Networks, K-Nearest Neighbors, Random Forest, and Support Vector Machine to classify tomato fungal leaf diseases: Alternaria, Curvularia, Helminthosporium, and Lasiodiplodi based on Gray Level Co-occurrence Matrix texture features. Small differences between symptoms of these diseases make it difficult to use the naked eye to obtain better results in detecting and distinguishing these diseases. The Artificial Neural Network outperformed other classifiers with an overall accuracy of 94% and average scores of 93.6% for Precision, 93.8% for Recall, and 93.8% for F1-score. Generally, the models confused samples originally belonging to Helminthosporium with Curvularia. The extracted texture features show great potential to classify the different tomato leaf fungal diseases. The results of this study show that texture characteristics of the Gray Level Co-occurrence Matrix play a critical role in the establishment of tomato leaf disease classification systems and can facilitate the implementation of preventive measures by farmers, resulting in enhanced yield quality and quantity.},\n bibtype = {article},\n author = {Nyasulu, Chimango and Diattara, Awa and Traore, Assitan and Ba, Cheikh and Diedhiou, Papa Madiallacké and Sy, Yakhya and Raki, Hind and Peluffo-Ordóñez, Diego Hernán},\n journal = {Heliyon}\n}
\n
\n\n\n
\n Globally, agriculture remains an important source of food and economic development. Due to various plant diseases, farmers continue to suffer huge yield losses in both quality and quantity. In this study, we explored the potential of using Artificial Neural Networks, K-Nearest Neighbors, Random Forest, and Support Vector Machine to classify tomato fungal leaf diseases: Alternaria, Curvularia, Helminthosporium, and Lasiodiplodi based on Gray Level Co-occurrence Matrix texture features. Small differences between symptoms of these diseases make it difficult to use the naked eye to obtain better results in detecting and distinguishing these diseases. The Artificial Neural Network outperformed other classifiers with an overall accuracy of 94% and average scores of 93.6% for Precision, 93.8% for Recall, and 93.8% for F1-score. Generally, the models confused samples originally belonging to Helminthosporium with Curvularia. The extracted texture features show great potential to classify the different tomato leaf fungal diseases. The results of this study show that texture characteristics of the Gray Level Co-occurrence Matrix play a critical role in the establishment of tomato leaf disease classification systems and can facilitate the implementation of preventive measures by farmers, resulting in enhanced yield quality and quantity.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Instance Selection on CNNs for Alzheimer’s Disease Classification from MRI.\n \n \n \n \n\n\n \n Castro-Silva., J.; Moreno-García., M.; Guachi-Guachi., L.; and Peluffo-Ordóñez., D.\n\n\n \n\n\n\n In Proceedings of the 11th International Conference on Pattern Recognition Applications and Methods - ICPRAM,, pages 330-337, 2022. SciTePress\n \n\n\n\n
\n\n\n\n \n \n \"InstanceWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Instance Selection on CNNs for Alzheimer’s Disease Classification from MRI},\n type = {inproceedings},\n year = {2022},\n pages = {330-337},\n websites = {https://www.scitepress.org/Link.aspx?doi=10.5220/0010900100003122},\n publisher = {SciTePress},\n institution = {INSTICC},\n id = {13440cc7-28e7-3171-81b6-6c68fa5f68ea},\n created = {2022-02-22T01:00:12.746Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-22T01:00:12.746Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {icpram22},\n source_type = {conference},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Castro-Silva., J and Moreno-García., M and Guachi-Guachi., Lorena and Peluffo-Ordóñez., D},\n doi = {10.5220/0010900100003122},\n booktitle = {Proceedings of the 11th International Conference on Pattern Recognition Applications and Methods - ICPRAM,}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Computer Vision Model to Identify the Incorrect Use of Face Masks for COVID-19 Awareness.\n \n \n \n \n\n\n \n Crespo, F.; Crespo, A.; Sierra-Martínez, L., M.; Peluffo-Ordóñez, D., H.; and Morocho-Cayamcela, M., E.\n\n\n \n\n\n\n Applied Sciences, 12(14). 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Computer Vision Model to Identify the Incorrect Use of Face Masks for COVID-19 Awareness},\n type = {article},\n year = {2022},\n volume = {12},\n websites = {https://www.mdpi.com/2076-3417/12/14/6924},\n id = {dcb5d6d6-83a4-3f4b-8d26-bb6c2c9f1b36},\n created = {2022-07-24T01:29:09.060Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-07-24T01:29:09.060Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {app12146924},\n source_type = {article},\n private_publication = {false},\n abstract = {Face mask detection has become a great challenge in computer vision, demanding the coalition of technology with COVID-19 awareness. Researchers have proposed deep learning models to detect the use of face masks. However, the incorrect use of a face mask can be as harmful as not wearing any protection at all. In this paper, we propose a compound convolutional neural network (CNN) architecture based on two computer vision tasks: object localization to discover faces in images/videos, followed by an image classification CNN to categorize the faces and show if someone is using a face mask correctly, incorrectly, or not at all. The first CNN is built upon RetinaFace, a model to detect faces in images, whereas the second CNN uses a ResNet-18 architecture as a classification backbone. Our model enables an accurate identification of people who are not correctly following the COVID-19 healthcare recommendations on face mask use. To enable further global use of our technology, we have released both the dataset used to train the classification model and our proposed computer vision pipeline to the public, and optimized it for embedded systems deployment.},\n bibtype = {article},\n author = {Crespo, Fabricio and Crespo, Anthony and Sierra-Martínez, Luz Marina and Peluffo-Ordóñez, Diego Hernán and Morocho-Cayamcela, Manuel Eugenio},\n doi = {10.3390/app12146924},\n journal = {Applied Sciences},\n number = {14}\n}
\n
\n\n\n
\n Face mask detection has become a great challenge in computer vision, demanding the coalition of technology with COVID-19 awareness. Researchers have proposed deep learning models to detect the use of face masks. However, the incorrect use of a face mask can be as harmful as not wearing any protection at all. In this paper, we propose a compound convolutional neural network (CNN) architecture based on two computer vision tasks: object localization to discover faces in images/videos, followed by an image classification CNN to categorize the faces and show if someone is using a face mask correctly, incorrectly, or not at all. The first CNN is built upon RetinaFace, a model to detect faces in images, whereas the second CNN uses a ResNet-18 architecture as a classification backbone. Our model enables an accurate identification of people who are not correctly following the COVID-19 healthcare recommendations on face mask use. To enable further global use of our technology, we have released both the dataset used to train the classification model and our proposed computer vision pipeline to the public, and optimized it for embedded systems deployment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Crop Classification Using Deep Learning: A Quick Comparative Study of Modern Approaches.\n \n \n \n \n\n\n \n Raki, H.; González-Vergara, J.; Aalaila, Y.; Elhamdi, M.; Bamansour, S.; Guachi-Guachi, L.; and Peluffo-Ordoñez, D., H.\n\n\n \n\n\n\n In Florez, H.; and Gomez, H., editor(s), Applied Informatics, pages 31-44, 2022. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"CropWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Crop Classification Using Deep Learning: A Quick Comparative Study of Modern Approaches},\n type = {inproceedings},\n year = {2022},\n pages = {31-44},\n websites = {https://link.springer.com/chapter/10.1007/978-3-031-19647-8_3},\n publisher = {Springer International Publishing},\n city = {Cham},\n id = {4c4e40eb-f5ea-3cae-820c-4b66f8d39ea5},\n created = {2022-10-15T21:45:02.144Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-10-15T21:45:02.144Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {10.1007/978-3-031-19647-8_3},\n source_type = {inproceedings},\n private_publication = {false},\n abstract = {Automatic crop classification using new technologies is recognized as one of the most important assets in today's smart farming improvement. Investments in technology and innovation are key issues for shaping agricultural productivity as well as the inclusiveness and sustainability of the global agricultural transformation. Digital image processing (DIP) has been widely adopted in this field, by merging Unmanned Aerial Vehicle (UAV) based remote sensing and deep learning (DL) as a powerful tool for crop classification. Despite the wide range of alternatives, the proper selection of a DL approach is still an open and challenging issue. In this work, we carry out an exhaustive performance evaluation of three remarkable and lightweight DL approaches, namely: Visual Geometry Group (VGG), Residual Neural Network (ResNet) and Inception V3, tested on high resolution agriculture crop images dataset. Experimental results show that InceptionV3 outperforms VGG and ResNet in terms of precision (0,92), accuracy (0,97), recall (0,91), AUC (0,98), PCR (0,97), and F1 (0,91).},\n bibtype = {inproceedings},\n author = {Raki, Hind and González-Vergara, Juan and Aalaila, Yahya and Elhamdi, Mouad and Bamansour, Sami and Guachi-Guachi, Lorena and Peluffo-Ordoñez, Diego H},\n editor = {Florez, Hector and Gomez, Henry},\n booktitle = {Applied Informatics}\n}
\n
\n\n\n
\n Automatic crop classification using new technologies is recognized as one of the most important assets in today's smart farming improvement. Investments in technology and innovation are key issues for shaping agricultural productivity as well as the inclusiveness and sustainability of the global agricultural transformation. Digital image processing (DIP) has been widely adopted in this field, by merging Unmanned Aerial Vehicle (UAV) based remote sensing and deep learning (DL) as a powerful tool for crop classification. Despite the wide range of alternatives, the proper selection of a DL approach is still an open and challenging issue. In this work, we carry out an exhaustive performance evaluation of three remarkable and lightweight DL approaches, namely: Visual Geometry Group (VGG), Residual Neural Network (ResNet) and Inception V3, tested on high resolution agriculture crop images dataset. Experimental results show that InceptionV3 outperforms VGG and ResNet in terms of precision (0,92), accuracy (0,97), recall (0,91), AUC (0,98), PCR (0,97), and F1 (0,91).\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Enhanced Convolutional-Neural-Network Architecture for Crop Classification.\n \n \n \n \n\n\n \n Moreno-revelo, M., Y.; Guachi-guachi, L.; Gómez-mendoza, J., B.; Revelo-fuelagán, J.; and Peluffo-ordóñez, D., H.\n\n\n \n\n\n\n Applied Sciences,1-23. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EnhancedPaper\n  \n \n \n \"EnhancedWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Enhanced Convolutional-Neural-Network Architecture for Crop Classification},\n type = {article},\n year = {2021},\n pages = {1-23},\n websites = {https://www.mdpi.com/2076-3417/11/9/4292},\n id = {9c641253-bae2-3f7d-a33c-080a28e31d9b},\n created = {2022-02-02T05:07:51.733Z},\n file_attached = {true},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:55.296Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Automatic crop identification and monitoring is a key element in enhancing food production processes as well as diminishing the related environmental impact. Although several efficient deep learning techniques have emerged in the field of multispectral imagery analysis, the crop classification problem still needs more accurate solutions. This work introduces a competitive methodology for crop classification from multispectral satellite imagery mainly using an enhanced 2D convolutional neural network (2D-CNN) designed at a smaller-scale architecture, as well as a novel post-processing step. The proposed methodology contains four steps: image stacking, patch extraction, classification model design (based on a 2D-CNN architecture), and post-processing. First, the images are stacked to increase the number of features. Second, the input images are split into patches and fed into the 2D-CNN model. Then, the 2D-CNN model is constructed within a small-scale framework, and properly trained to recognize 10 different types of crops. Finally, a post-processing step is performed in order to reduce the classification error caused by lower-spatial-resolution images. Experiments were carried over the so-named Campo Verde database, which consists of a set of satellite images captured by Landsat and Sentinel satellites from the municipality of Campo Verde, Brazil. In contrast to the maximum accuracy values reached by remarkable works reported in the literature (amounting to an overall accuracy of about 81%, a f1 score of 75.89%, and average accuracy of 73.35%), the proposed methodology achieves a competitive overall accuracy of 81.20%, a f1 score of 75.89%, and an average accuracy of 88.72% when classifying 10 different crops, while ensuring an adequate trade-off between the number of multiply-accumulate operations (MACs) and accuracy. Furthermore, given its ability to effectively classify patches from two image sequences, this methodology may result appealing for other real-world applications, such as the classification of urban materials.},\n bibtype = {article},\n author = {Moreno-revelo, Mónica Y and Guachi-guachi, Lorena and Gómez-mendoza, Juan Bernardo and Revelo-fuelagán, Javier and Peluffo-ordóñez, Diego H},\n journal = {Applied Sciences}\n}
\n
\n\n\n
\n Automatic crop identification and monitoring is a key element in enhancing food production processes as well as diminishing the related environmental impact. Although several efficient deep learning techniques have emerged in the field of multispectral imagery analysis, the crop classification problem still needs more accurate solutions. This work introduces a competitive methodology for crop classification from multispectral satellite imagery mainly using an enhanced 2D convolutional neural network (2D-CNN) designed at a smaller-scale architecture, as well as a novel post-processing step. The proposed methodology contains four steps: image stacking, patch extraction, classification model design (based on a 2D-CNN architecture), and post-processing. First, the images are stacked to increase the number of features. Second, the input images are split into patches and fed into the 2D-CNN model. Then, the 2D-CNN model is constructed within a small-scale framework, and properly trained to recognize 10 different types of crops. Finally, a post-processing step is performed in order to reduce the classification error caused by lower-spatial-resolution images. Experiments were carried over the so-named Campo Verde database, which consists of a set of satellite images captured by Landsat and Sentinel satellites from the municipality of Campo Verde, Brazil. In contrast to the maximum accuracy values reached by remarkable works reported in the literature (amounting to an overall accuracy of about 81%, a f1 score of 75.89%, and average accuracy of 73.35%), the proposed methodology achieves a competitive overall accuracy of 81.20%, a f1 score of 75.89%, and an average accuracy of 88.72% when classifying 10 different crops, while ensuring an adequate trade-off between the number of multiply-accumulate operations (MACs) and accuracy. Furthermore, given its ability to effectively classify patches from two image sequences, this methodology may result appealing for other real-world applications, such as the classification of urban materials.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of current deep convolutional neural networks for the segmentation of breast masses in mammograms.\n \n \n \n \n\n\n \n Anaya-Isaza, A.; Mera-Jiménez, L.; Cabrera-Chavarro, J.; Guachi-Guachi, L.; Peluffo-Ordóñez, D.; and Rios-Patiño, J.\n\n\n \n\n\n\n IEEE Access. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ComparisonWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Comparison of current deep convolutional neural networks for the segmentation of breast masses in mammograms},\n type = {article},\n year = {2021},\n websites = {https://ieeexplore.ieee.org/document/9614200},\n id = {2055ee3b-3c82-35ed-b6e5-b86fee916bcf},\n created = {2022-02-02T05:07:52.251Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:52.251Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {9614200},\n source_type = {article},\n private_publication = {false},\n abstract = {Breast cancer causes approximately 684,996 deaths worldwide, making it the leading cause of female cancer mortality. However, these figures can be reduced with early diagnosis through mammographic imaging, allowing for the timely and effective treatment of this disease. To establish the best tools for contributing to the automatic diagnosis of breast cancer, different deep learning (DL) architectures were compared in terms of breast lesion segmentation, lesion type classification, and degree of suspicion of malignancy tests. The tasks were completed with state-of-the-art architectures and backbones. Initially, during segmentation, the base UNet, Visual Geometry Group 19 (VGG19), InceptionResNetV2, EfficientNet, MobileNetv2, ResNet, ResNeXt, MultiResUNet, linkNet-VGG19, DenseNet, SEResNet and SeResNeXt architectures were compared, where “Res” denotes a residual network. In addition, training was performed with 5 of the most advanced loss functions and validated by the Dice coefficient, sensitivity, and specificity. The proposed models achieved Dice values above 90%, with the EfficientNet architecture achieving 94.75% and 99% accuracy on the two tasks. Subsequently, classification was addressed with the ResNet50V2, VGG19, InceptionResNetV2, DenseNet121, InceptionV3, Xception and EfficientNetB7 networks. The proposed models achieved 96.97% and 97.73% accuracy through the VGG19 and ResNet50V2 networks on the lesion classification and degree of suspicion tasks, respectively. All three tasks were addressed with open-access databases, including the Digital Database for Screening Mammography (DDSM), the Mammographic Image Analysis Society (MIAS) database, and INbreast.},\n bibtype = {article},\n author = {Anaya-Isaza, Andrés and Mera-Jiménez, Leonel and Cabrera-Chavarro, Johan and Guachi-Guachi, Lorena and Peluffo-Ordóñez, Diego and Rios-Patiño, Jorge},\n doi = {10.1109/ACCESS.2021.3127862},\n journal = {IEEE Access}\n}
\n
\n\n\n
\n Breast cancer causes approximately 684,996 deaths worldwide, making it the leading cause of female cancer mortality. However, these figures can be reduced with early diagnosis through mammographic imaging, allowing for the timely and effective treatment of this disease. To establish the best tools for contributing to the automatic diagnosis of breast cancer, different deep learning (DL) architectures were compared in terms of breast lesion segmentation, lesion type classification, and degree of suspicion of malignancy tests. The tasks were completed with state-of-the-art architectures and backbones. Initially, during segmentation, the base UNet, Visual Geometry Group 19 (VGG19), InceptionResNetV2, EfficientNet, MobileNetv2, ResNet, ResNeXt, MultiResUNet, linkNet-VGG19, DenseNet, SEResNet and SeResNeXt architectures were compared, where “Res” denotes a residual network. In addition, training was performed with 5 of the most advanced loss functions and validated by the Dice coefficient, sensitivity, and specificity. The proposed models achieved Dice values above 90%, with the EfficientNet architecture achieving 94.75% and 99% accuracy on the two tasks. Subsequently, classification was addressed with the ResNet50V2, VGG19, InceptionResNetV2, DenseNet121, InceptionV3, Xception and EfficientNetB7 networks. The proposed models achieved 96.97% and 97.73% accuracy through the VGG19 and ResNet50V2 networks on the lesion classification and degree of suspicion tasks, respectively. All three tasks were addressed with open-access databases, including the Digital Database for Screening Mammography (DDSM), the Mammographic Image Analysis Society (MIAS) database, and INbreast.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Drowsiness Detection in Drivers Through Real-Time Image Processing of the Human Eye.\n \n \n \n \n\n\n \n Herrera-Granda, E., P.; Caraguay-Procel, J., A.; Granda-Gudiño, P., D.; Herrera-Granda, I., D.; Lorente-Leyva, L., L.; Peluffo-Ordóñez, D., H.; and Revelo-Fuelagán, J.\n\n\n \n\n\n\n In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 2019. \n \n\n\n\n
\n\n\n\n \n \n \"DrowsinessWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Drowsiness Detection in Drivers Through Real-Time Image Processing of the Human Eye},\n type = {inproceedings},\n year = {2019},\n keywords = {Alarm,Artificial intelligence,Drowsiness detection,Human eye,Image processing},\n websites = {https://link.springer.com/chapter/10.1007/978-3-030-14799-0_54},\n id = {cec5d659-15b9-338c-ade1-46e47f4770a4},\n created = {2022-02-02T05:07:52.660Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:52.660Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Herrera-Granda2019},\n private_publication = {false},\n abstract = {At a global level, drowsiness is one of the main causes of road accidents causing frequent deaths and economic losses. To solve this problem an application developed in Matlab environment was made, which processes real time acquired images in order to determine if the driver is awake or drowsy. Using AdaBoost training Algorithm for Viola-Jones eyes detection, a cascade classifier finds the location and the area of the driver eyes in each frame of the video. Once the driver eyes are detected, they are analyzed whether are open or closed by color segmentation and thresholding based on the sclera binarized area. Finally, it was implemented as a drowsiness detection system which aims to prevent driver fall asleep while driving a vehicle by activating an audible alert, reaching speeds up to 14.5 fps.},\n bibtype = {inproceedings},\n author = {Herrera-Granda, Erick P. and Caraguay-Procel, Jorge A. and Granda-Gudiño, Pedro D. and Herrera-Granda, Israel D. and Lorente-Leyva, Leandro L. and Peluffo-Ordóñez, Diego H. and Revelo-Fuelagán, Javier},\n doi = {10.1007/978-3-030-14799-0_54},\n booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n At a global level, drowsiness is one of the main causes of road accidents causing frequent deaths and economic losses. To solve this problem an application developed in Matlab environment was made, which processes real time acquired images in order to determine if the driver is awake or drowsy. Using AdaBoost training Algorithm for Viola-Jones eyes detection, a cascade classifier finds the location and the area of the driver eyes in each frame of the video. Once the driver eyes are detected, they are analyzed whether are open or closed by color segmentation and thresholding based on the sclera binarized area. Finally, it was implemented as a drowsiness detection system which aims to prevent driver fall asleep while driving a vehicle by activating an audible alert, reaching speeds up to 14.5 fps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multivariate Approach to Alcohol Detection in Drivers by Sensors and Artificial Vision.\n \n \n \n \n\n\n \n Rosero-Montalvo, P., D.; López-Batista, V., F.; Peluffo-Ordóñez, D., H.; Erazo-Chamorro, V., C.; and Arciniega-Rocha, R., P.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 234-243. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2019},\n keywords = {Alcohol detection,Drunk detection,Prototype selection,Sensors,Supervised classification},\n pages = {234-243},\n websites = {http://link.springer.com/10.1007/978-3-030-19651-6_23},\n id = {542403c5-86fa-3dd1-9f89-1d793e1f7c63},\n created = {2022-02-02T05:07:53.045Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:53.045Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Rosero-Montalvo2019a},\n private_publication = {false},\n abstract = {This work presents a system for detecting excess alcohol in drivers to reduce road traffic accidents. To do so, criteria such as alcohol concentration the environment, a facial temperature of the driver and width of the pupil are considered. To measure the corresponding variables, the data acquisition procedure uses sensors and artificial vision. Subsequently, data analysis is performed into stages for prototype selection and supervised classification algorithms. Accordingly, the acquired data can be stored and processed in a system with low-computational resources. As a remarkable result, the amount of training samples is significantly reduced, while an admissible classification performance is achieved - reaching then suitable settings regarding the given device’s conditions.},\n bibtype = {inbook},\n author = {Rosero-Montalvo, Paul D. and López-Batista, Vivian F. and Peluffo-Ordóñez, Diego H. and Erazo-Chamorro, Vanessa C. and Arciniega-Rocha, Ricardo P.},\n doi = {10.1007/978-3-030-19651-6_23},\n chapter = {Multivariate Approach to Alcohol Detection in Drivers by Sensors and Artificial Vision},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n This work presents a system for detecting excess alcohol in drivers to reduce road traffic accidents. To do so, criteria such as alcohol concentration the environment, a facial temperature of the driver and width of the pupil are considered. To measure the corresponding variables, the data acquisition procedure uses sensors and artificial vision. Subsequently, data analysis is performed into stages for prototype selection and supervised classification algorithms. Accordingly, the acquired data can be stored and processed in a system with low-computational resources. As a remarkable result, the amount of training samples is significantly reduced, while an admissible classification performance is achieved - reaching then suitable settings regarding the given device’s conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Satellite-image-based crop identification using unsupervised machine learning techniques: Preliminary results.\n \n \n \n \n\n\n \n Revelo, M., Y., M.; Gómez Menoza, J., B.; and Peluffo Ordoñez, D., H.\n\n\n \n\n\n\n RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Satellite-image-basedWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Satellite-image-based crop identification using unsupervised machine learning techniques: Preliminary results},\n type = {article},\n year = {2019},\n keywords = {Landsat satellite,Max-min algorithm,Parzen’s probability density function,Satellite image},\n websites = {https://search.proquest.com/openview/07a5294795bdf4c5423a32a23b32a228},\n id = {d0a1ecdd-30e1-3a3e-9726-4bded79fdc09},\n created = {2022-02-02T05:07:53.590Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:53.590Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Revelo2019},\n private_publication = {false},\n abstract = {Artificial vision and image processing have been widely used in the field of scientific research related to satellite landscapes with purposes, like soil classification, detection of changes in urban and rural areas, among others. The existing prototypes have reported meaningful results, notwithstanding, the implementation of a system more properly fitting the nature of the images by taking into account factors such as lighting control, noise reduction and presence of clouds is still an open and of-great-interest problem. This paper presents an initial satellite image processing methodology for clustering crops. The proposed methodology is as follows: Firstly, data pre-processing is carried out, followed by a feature extraction stage. Secondly, image clustering is performed by means of a probabilistic algorithm. This methodology is validated with the Campo Verde database built over crops from a Brazil’s area. Our approach reaches a classification percentage 87.97%, sensitivity 87.1%, specificity 97.22 and f1_score 71.78 %.},\n bibtype = {article},\n author = {Revelo, Mónica Yolanda Moreno and Gómez Menoza, Juan Bernardo and Peluffo Ordoñez, Diego Hernán},\n journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao}\n}
\n
\n\n\n
\n Artificial vision and image processing have been widely used in the field of scientific research related to satellite landscapes with purposes, like soil classification, detection of changes in urban and rural areas, among others. The existing prototypes have reported meaningful results, notwithstanding, the implementation of a system more properly fitting the nature of the images by taking into account factors such as lighting control, noise reduction and presence of clouds is still an open and of-great-interest problem. This paper presents an initial satellite image processing methodology for clustering crops. The proposed methodology is as follows: Firstly, data pre-processing is carried out, followed by a feature extraction stage. Secondly, image clustering is performed by means of a probabilistic algorithm. This methodology is validated with the Campo Verde database built over crops from a Brazil’s area. Our approach reaches a classification percentage 87.97%, sensitivity 87.1%, specificity 97.22 and f1_score 71.78 %.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Fingertips Segmentation of Thermal Images and Its Potential Use in Hand Thermoregulation Analysis.\n \n \n \n \n\n\n \n Castro-Ospina, A., E.; Correa-Mira, A., M.; Herrera-Granda, I., D.; Peluffo-Ordóñez, D., H.; and Fandiño-Toro, H., A.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 455-463. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2018},\n keywords = {Fingertip segmentation,NPR measurement,Thermal hand images,Thermorregulation},\n pages = {455-463},\n websites = {http://link.springer.com/10.1007/978-3-319-92639-1_38},\n id = {8064220d-7c13-3618-b32a-dc3a0730abde},\n created = {2022-02-02T05:07:53.964Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:53.964Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Castro-Ospina2018},\n private_publication = {false},\n abstract = {Thermoregulation refers to the physiological processes that maintain stable the body temperatures. Infrared thermography is a non-invasive technique useful for visualizing these temperatures. Previous works suggest it is important to analyze thermoregulation in peripheral regions, such as the fingertips, because some disabling pathologies affect particularly the thermoregulation of these regions. This work proposes an algorithm for fingertip segmentation in thermal images of the hand. By using a supervised index, the results are compared against segmentations provided by humans. The results are outstanding even when the analyzed images are highly resized.},\n bibtype = {inbook},\n author = {Castro-Ospina, A. E. and Correa-Mira, A. M. and Herrera-Granda, I. D. and Peluffo-Ordóñez, D. H. and Fandiño-Toro, H. A.},\n doi = {10.1007/978-3-319-92639-1_38},\n chapter = {Fingertips Segmentation of Thermal Images and Its Potential Use in Hand Thermoregulation Analysis},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Thermoregulation refers to the physiological processes that maintain stable the body temperatures. Infrared thermography is a non-invasive technique useful for visualizing these temperatures. Previous works suggest it is important to analyze thermoregulation in peripheral regions, such as the fingertips, because some disabling pathologies affect particularly the thermoregulation of these regions. This work proposes an algorithm for fingertip segmentation in thermal images of the hand. By using a supervised index, the results are compared against segmentations provided by humans. The results are outstanding even when the analyzed images are highly resized.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computer Vision-Based Method for Automatic Detection of Crop Rows in Potato Fields.\n \n \n \n \n\n\n \n García-Santillán, I.; Peluffo-Ordoñez, D.; Caranqui, V.; Pusdá, M.; Garrido, F.; and Granda, P.\n\n\n \n\n\n\n Advances in Intelligent Systems and Computing, pages 355-366. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"AdvancesWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2018},\n keywords = {Autonomous guidance,Computer vision,Crop row detection,Image segmentation},\n pages = {355-366},\n websites = {http://link.springer.com/10.1007/978-3-319-73450-7_34},\n id = {f855034a-cec0-31ba-8c78-4ccfb37e3f1a},\n created = {2022-02-02T05:07:54.319Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:54.319Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Garcia-Santillan2018},\n private_publication = {false},\n abstract = {This work presents an adaptation and validation of a method for automatic crop row detection from images captured in potato fields (Solanum tuberosum) for initial growth stages based on the micro-ROI concept. The crop row detection is a crucial aspect for autonomous guidance of agricultural vehicles and site-specific treatments application. The images were obtained using a color camera installed in the front of a tractor under perspective projection. There are some issues that can affect the quality of the images and the detection procedure, among them: uncontrolled illumination in outdoor agricultural environments, different plant densities, presence of weeds and gaps in the crop rows. The adapted approach was designed to address these adverse situations and it consists of three linked phases. The main contribution is the ability to detect straight and curved crop rows in potato crops. The performance was quantitatively compared against two existing methods, achieving acceptable results in terms of accuracy and processing time.},\n bibtype = {inbook},\n author = {García-Santillán, Iván and Peluffo-Ordoñez, Diego and Caranqui, Víctor and Pusdá, Marco and Garrido, Fernando and Granda, Pedro},\n doi = {10.1007/978-3-319-73450-7_34},\n chapter = {Computer Vision-Based Method for Automatic Detection of Crop Rows in Potato Fields},\n title = {Advances in Intelligent Systems and Computing}\n}
\n
\n\n\n
\n This work presents an adaptation and validation of a method for automatic crop row detection from images captured in potato fields (Solanum tuberosum) for initial growth stages based on the micro-ROI concept. The crop row detection is a crucial aspect for autonomous guidance of agricultural vehicles and site-specific treatments application. The images were obtained using a color camera installed in the front of a tractor under perspective projection. There are some issues that can affect the quality of the images and the detection procedure, among them: uncontrolled illumination in outdoor agricultural environments, different plant densities, presence of weeds and gaps in the crop rows. The adapted approach was designed to address these adverse situations and it consists of three linked phases. The main contribution is the ability to detect straight and curved crop rows in potato crops. The performance was quantitatively compared against two existing methods, achieving acceptable results in terms of accuracy and processing time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Advances in Homotopy Applied to Object Deformation.\n \n \n \n \n\n\n \n Salazar-Castro, J., A.; Umaquinga-Criollo, A., C.; Cruz-Cruz, L., D.; Alpala-Alpala, L., O.; González-Castaño, C.; Becerra-Botero, M., A.; Peluffo-Ordóñez, D., H.; and Castellanos-Domínguez, C., G.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 231-242. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2018},\n keywords = {Homotopy,Object deformation,Smooth transitions,Transcendental functions},\n pages = {231-242},\n websites = {http://link.springer.com/10.1007/978-3-319-78759-6_22},\n id = {84c182d8-5a89-3d44-8f48-4a49477e40ef},\n created = {2022-02-02T05:07:54.833Z},\n file_attached = {false},\n profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},\n group_id = {13ef799e-a79c-38bd-a5cc-187330c349ae},\n last_modified = {2022-02-02T05:07:54.833Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Salazar-Castro2018b},\n private_publication = {false},\n abstract = {This work explores novel alternatives to conventional linear homotopy to enhance the quality of resulting transitions from object deformation applications. Studied/introduced approaches extend the linear mapping to other representations that provides smooth transitions when deforming objects while homotopy conditions are fulfilled. Such homotopy approaches are based on transcendental functions (TFH) in both simple and parametric versions. As well, we propose a variant of an existing quality indicator based on the ratio between the coefficients curve of resultant homotopy and that of a less-realistic, reference homotopy. Experimental results depict the effect of proposed TFH approaches regarding its usability and benefit for interpolating images formed by homotopic objects with smooth changes.},\n bibtype = {inbook},\n author = {Salazar-Castro, Jose Alejandro and Umaquinga-Criollo, Ana Cristina and Cruz-Cruz, Lilian Dayana and Alpala-Alpala, Luis Omar and González-Castaño, Catalina and Becerra-Botero, Miguel A. and Peluffo-Ordóñez, Diego Hernán and Castellanos-Domínguez, Cesar Germán},\n doi = {10.1007/978-3-319-78759-6_22},\n chapter = {Advances in Homotopy Applied to Object Deformation},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n This work explores novel alternatives to conventional linear homotopy to enhance the quality of resulting transitions from object deformation applications. Studied/introduced approaches extend the linear mapping to other representations that provides smooth transitions when deforming objects while homotopy conditions are fulfilled. Such homotopy approaches are based on transcendental functions (TFH) in both simple and parametric versions. As well, we propose a variant of an existing quality indicator based on the ratio between the coefficients curve of resultant homotopy and that of a less-realistic, reference homotopy. Experimental results depict the effect of proposed TFH approaches regarding its usability and benefit for interpolating images formed by homotopic objects with smooth changes.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);