<script src="https://bibbase.org/service/mendeley/53302730-7b0b-3598-84bc-0c824c8d5a8d?authorFirst=1&theme=dividers&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/service/mendeley/53302730-7b0b-3598-84bc-0c824c8d5a8d?authorFirst=1&theme=dividers");
print_r($contents);
?>
<iframe src="https://bibbase.org/service/mendeley/53302730-7b0b-3598-84bc-0c824c8d5a8d?authorFirst=1&theme=dividers"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@article{ title = {A hybrid genetic algorithm for optimizing urban distribution of auto-parts by a vertex routing problem}, type = {article}, year = {2023}, keywords = {distribution of auto parts,genetic algorithms,heuristic,urban logistics,vrp}, websites = {https://rev-inv-ope.pantheonsorbonne.fr/sites/default/files/inline-files/44123-12_author.pdf}, id = {51e4569c-6934-3dca-8f9d-f2b5dcaecf52}, created = {2021-06-13T01:32:15.291Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2023-04-30T04:38:09.211Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {In the present work, a hybrid algorithm is designed and implemented, by combining genetic-algorithm meta-heuristics, and the nearest neighbor heuristics, aimed at solving the capacited vehicle routing problem with time windows (CVRPTW). The proposed implementation optimizes the distribution for an auto-parts trading company, within the urban perimeter of the city of Quito-Ecuador. By design, a script coded in C# language is developed. Besides, in order to evaluate the quality of the solutions generated by the proposed hybrid algorithm, different instances of the problem are built, by taking small samples from the whole customers’ information. For comparison purposes, the mathematical model of the CVRPTW available in the GAMS optimization software is considered. As well, real instances of the problem are considered, on which the generated routes reaches an improvement up to 20% of the distances traveled by the routes traditionally used in by the case-study company.}, bibtype = {article}, author = {Herrera-granda, Israel D and Martín-barreiro, Carlos and Herrera-granda, Erick P and Peluffo-ordoñez, Diego H}, journal = {REVISTA INVESTIGACION OPERACIONAL} }
@article{ title = {An MPPT Strategy Based on a Surface-Based Polynomial Fitting for Solar Photovoltaic Systems Using Real-Time Hardware}, type = {article}, year = {2021}, pages = {206}, volume = {10}, websites = {https://www.mdpi.com/2079-9292/10/2/206}, month = {1}, day = {17}, id = {f42e1ab1-4e58-320f-9e2a-e030c638ae1d}, created = {2021-01-31T16:23:13.880Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.745Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Gonzalez-Castano2021}, private_publication = {false}, abstract = {This paper presents an optimal design of a surface-based polynomial fitting for tracking the maximum power point (MPPT) of a photovoltaic (PV) system, here named surface-based polynomial fitting (MPPT-SPF). The procedure of the proposed MPPT-SPF strategy is based on a polynomial model to characterize data from the PV module with a global fit. The advantage of using polynomials is that they provide a good fit within a predefined data range even though they can diverge greatly from that range. The MPPT-SPF strategy is integrated with a DC-DC boost converter to verify its performance and its interaction with different control loops. Therefore, the MPPT strategy is applied to the reference outer PI control loop, which in turn provides the current reference to the inner current loop based on a discrete-time sliding current control. A real-time and high-speed simulator (PLECS RT Box 1) and a digital signal controller (DSC) are used to implement the hardware-in-the-loop system to obtain the results. The proposed strategy does not have a high computational cost and can be implemented in a commercial low-cost DSC (TI 28069M). The proposed MPPT strategy is compared with a conventional perturb and observe method to prove its effectiveness under demanding tests.}, bibtype = {article}, author = {González-Castaño, Catalina and Lorente-Leyva, Leandro L. and Muñoz, Javier and Restrepo, Carlos and Peluffo-Ordóñez, Diego H.}, doi = {10.3390/electronics10020206}, journal = {Electronics}, number = {2} }
@article{ title = {Overview on kernels for least-squares support-vector-macihine-based clustering: explaining kernel expectral clustering.}, type = {article}, year = {2021}, keywords = {clustering,kernel principal component,kernel spectral clustering ksc,support vector machine,svm}, websites = {https://rev-inv-ope.pantheonsorbonne.fr/sites/default/files/inline-files/42121-10.pdf}, id = {4a37ca65-31aa-37fc-9974-f1c4ec5d6c9e}, created = {2021-02-24T15:05:25.096Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-04-03T19:58:57.628Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Fernandez2021}, private_publication = {false}, abstract = {This letter presents an overview on some remarkable basics on kernels as well as the formulation of a clustering approach based on least-squares support vector machines. Specifically, the method known as kernel spectral clustering (KSC) is of interest. We explore the links between KSC and a weighted version of kernel principal component analysis (WKPCA). Also, we study the solution of the KSC problem by means of a primal-dual scheme. All mathematical developments are carried out following an entirely matrix formulation. As a result, in addition to the elegant KSC formulation, important insights and hints about the use and design of kernel-based approaches for clustering are provided.}, bibtype = {article}, author = {Fernández, Y. and Marrufo, I. and Paez, M. A. and Umaquinga-Criollo, A. C. and Rosero, P. D. and Peluffo-Ordóñez, H. D.}, journal = {REVISTA INVESTIGACION OPERACIONAL} }
@article{ title = {Air Pollution Monitoring Using WSN Nodes with Machine Learning Techniques: A Case Study}, type = {article}, year = {2021}, websites = {https://academic.oup.com/jigpal/advance-article/doi/10.1093/jigpal/jzab005/6133990}, month = {2}, day = {13}, id = {ba4ed13e-b580-3a62-88fa-4af592f3dcc0}, created = {2021-02-24T15:13:31.386Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-28T00:27:46.980Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2021}, private_publication = {false}, abstract = {Air pollution is a current concern of people and government entities. Therefore, in urban scenarios, its monitoring and subsequent analysis is a remarkable and challenging issue due mainly to the variability of polluting-related factors. For this reason, the present work shows the development of a wireless sensor network that, through machine learning techniques, can be classified into three different types of environments: high pollution levels, medium pollution and no noticeable contamination into the Ibarra City. To achieve this goal, signal smoothing stages, prototype selection, feature analysis and a comparison of classification algorithms are performed. As relevant results, there is a classification performance of 95% with a significant noisy data reduction.}, bibtype = {article}, author = {Rosero-Montalvo, Paul D and López-Batista, Vivian F and Arciniega-Rocha, Ricardo and Peluffo-Ordóñez, Diego H}, doi = {10.1093/jigpal/jzab005}, journal = {Logic Journal of the IGPL} }
@inproceedings{ title = {A Brief Review on Instance Selection Based on Condensed Nearest Neighbors for Data Classification Tasks}, type = {inproceedings}, year = {2021}, pages = {313-324}, websites = {https://link.springer.com/chapter/10.1007/978-981-33-4909-4_23}, publisher = {Springer Singapore}, city = {Singapore}, id = {3bc11380-898c-394d-806f-3a0de8e3a012}, created = {2021-03-29T00:46:45.647Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T00:50:34.562Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-981-33-4909-4_23}, source_type = {inproceedings}, private_publication = {false}, abstract = {The condensed nearest neighbor (CNN) classifier is one of the techniques used and known to perform recognition tasks. It has also proven to be one of the most interesting algorithms in the field of data mining despite its simplicity. However, CNN suffers from several drawbacks, such as high storage requirements and low noise tolerance. One of the characteristics of CNN is that it focuses on the selection of prototypes, which consists of reducing the set of training data. One of the goals of CNN seeks to achieve the reduction of information in such a way that the reduced information can represent large amounts of data to exercise decision-making on them. This paper mentions some of the most recent contributions to CNN-based unsupervised algorithms in a review that builds on the mathematical principles of condensed methods.}, bibtype = {inproceedings}, author = {Fernández-Fernández, Yasmany and Peluffo-Ordóñez, Diego H and Umaquinga-Criollo, Ana C and Lorente-Leyva, Leandro L and Cabrera-Alvarez, Elia N}, editor = {Bindhu, V and Tavares, João Manuel R S and Boulogeorgos, Alexandros-Apostolos A and Vuppalapati, Chandrasekar}, booktitle = {International Conference on Communication, Computing and Electronics Systems} }
@inproceedings{ title = {A Dynamic Programming Approach for Power Curtailment Decision Making on PV Systems}, type = {inproceedings}, year = {2021}, pages = {77-86}, websites = {https://link.springer.com/chapter/10.1007/978-981-33-4909-4_6}, publisher = {Springer Singapore}, city = {Singapore}, id = {dfee110b-6c29-3fae-b8a5-91a62e53db69}, created = {2021-03-29T00:46:45.663Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T00:50:34.549Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-981-33-4909-4_6}, source_type = {inproceedings}, private_publication = {false}, abstract = {The new grid codes for large-scale photovoltaic power plants require power curtailment despite the variation of solar irradiance. This power curtailment is been developed considering one reference of active power. However, this value is chosen according to the demand, but it is not considering other aspects as solar irradiance or cloudiness. Therefore, this article presents a novel approach to tackle this issue. For this, stochastic dynamic programming is considered to optimize the decision of the power reference every hour considering the solar irradiance and cloudiness during different stages of the day. The results obtained are compared with the performance of the photovoltaic power plant, and it is a referential approach that uses the maximum power point tracking algorithms for the construction of referential power intervals over longer time intervals.}, bibtype = {inproceedings}, author = {Fernández-Fernández, Yasmany and Lorente-Leyva, Leandro L and Peluffo-Ordóñez, Diego H and Álvarez, Elia N Cabrera}, editor = {Bindhu, V and Tavares, João Manuel R S and Boulogeorgos, Alexandros-Apostolos A and Vuppalapati, Chandrasekar}, booktitle = {International Conference on Communication, Computing and Electronics Systems} }
@inproceedings{ title = {An Interactive Framework to Compare Multi-criteria Optimization Algorithms: Preliminary Results on NSGA-II and MOPSO}, type = {inproceedings}, year = {2021}, pages = {61-76}, websites = {https://link.springer.com/chapter/10.1007/978-981-33-4909-4_5}, publisher = {Springer Singapore}, city = {Singapore}, id = {bf6fa3ab-6282-35f3-9f4f-d2c80d96d589}, created = {2021-03-29T03:18:31.642Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T03:21:16.280Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-981-33-4909-4_5}, source_type = {inproceedings}, private_publication = {false}, abstract = {A problem of multi-criteria optimization, according to its approach, can mean either minimizing or maximizing a group of at least two objective functions to find the best possible set of solutions. There are several methods of multi-criteria optimization, in which the resulting solutions' quality varies depending on the method used and the complexity of the posed problem. A bibliographical review allowed us to notice that the methods derived from the evolutionary computation deliver good results and are commonly used in research works. Although comparative studies among these optimization methods have been found, the conclusions that these offer to the reader do not allow us to define a general rule that determines when one method is better than another. Therefore, the choice of a well-adapted optimization method can be a difficult task for non-experts in the field. To implement a graphical interface that allows non-expert users in multi-objective optimization is proposed to interact and compare the performance of the NSGA-II and MOPSO algorithms. It is chosen qualitatively from a group of five preselected algorithms as members of evolutionary algorithms and swarm intelligence. Therefore, a comparison methodology has been proposed to allow the user for analyzing the graphical and numerical results, which will observe the behavior of algorithms and determine the best suited one according to their needs.}, bibtype = {inproceedings}, author = {Dorado-Sevilla, David F and Peluffo-Ordóñez, Diego H and Lorente-Leyva, Leandro L and Herrera-Granda, Erick P and Herrera-Granda, Israel D}, editor = {Bindhu, V and Tavares, João Manuel R S and Boulogeorgos, Alexandros-Apostolos A and Vuppalapati, Chandrasekar}, booktitle = {International Conference on Communication, Computing and Electronics Systems} }
@article{ title = {Evaluation of Working Temperature in Wind Turbine Bearings by Simulation of Lubricant Level}, type = {article}, year = {2021}, keywords = {bearing,cfd,heat,lubricant,simulation,wind turbines,working temperature}, pages = {99-104}, volume = {16}, websites = {http://www.iieta.org/journals/ijdne/paper/10.18280/ijdne.160113}, id = {e8efe26f-515d-33e9-ad97-fe2cc3becf8a}, created = {2021-03-29T19:38:22.013Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T20:15:17.037Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {After studies related to the technical state and behavior of the temperatures manifested by the bearings of the generators that make up the Goldwind wind turbines models S50-750, installed in the Gibara II Wind Farm (PE). It was identified and validated as a tool that enables early diagnosis of anomalous bearing behaviors, SolidWorks (SW) computer-aided design and engineering software (CAD-CAE) and the Flow Simulation add-on. Since it allows studies based on the computational fluid dynamics (CFD), of the temperatures that are manifested in the lubricant during the different working regimes of the generator. The studies carried out evaluated the environmental conditions of exploitation in Cuba. It works on obtaining and predicting the values of the thermal state using the principles and methods for the calculation of heat transfer, the forecast statistics apply. The research supports its theories and postulates in a sample of 6 installed equipment, from China, for it had a historical database that collects temperature measurements in different working conditions which allowed to establish correlation between theoretical predictions and real behaviors.}, bibtype = {article}, author = {Feliciano, Yorley Arbella and Varela, Carlos A Trinchet and Guativas, Javier A Vargas and Lorente-leyva, Leandro L and Peluffo-ordóñez, Diego H}, journal = {IIETA}, number = {1} }
@inproceedings{ title = {Analysis of Business Behavior in the Australian Market Under an Approach of Statistical Techniques and Economic Dimensions for Sustainable Business: A Case Study}, type = {inproceedings}, year = {2021}, pages = {595-605}, websites = {https://link.springer.com/chapter/10.1007/978-981-33-4355-9_44}, publisher = {Springer Singapore}, city = {Singapore}, id = {b6048c5d-4e60-3fc8-9b6d-e44781a39316}, created = {2021-04-02T15:43:27.763Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-04-02T16:16:14.537Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-981-33-4355-9_44}, source_type = {inproceedings}, private_publication = {false}, abstract = {This paper provides a current and future business analysis of small food services and products company. Analysis methods include tabulation of the dataset, as well as hypothesis testing by comparison between directly proportional variables such as price/quality and recommendations/customer loyalty. Other calculations include key economic dimensions, decisions influenced by the JobKeeper payment scheme, data on capital expenditure expectations, and future business conditions. The results of the analyzed data show that customer and company behavior is in parallel with global trade. In particular, the growth of the digital market and customer loyalty where they find a product that meets their quality needs. The analysis finds that the company's prospects in its current position are positive. Of the four variables identified, two will be reinforced by the economic and market strategies to be implemented. The work also investigates the fact that the analysis carried out has possible limitations. Some of the limitations are that not all the company was able to register in the JobKeeper payment scheme and the lack of use of key tools for sustainable marketing.}, bibtype = {inproceedings}, author = {Patiño-Alarcón, Delio R and Patiño-Alarcón, Fernando A and Lorente-Leyva, Leandro L and Peluffo-Ordóñez, Diego H}, booktitle = {Proceedings of International Conference on Sustainable Expert Systems} }
@article{ title = {Design of a low computational cost prototype for cardiac arrhythmia detection [Diseño de un prototipo de bajo coste computacional para detección de arritmias cardiacas]}, type = {article}, year = {2021}, pages = {470-479}, volume = {2021}, websites = {https://search.proquest.com/openview/d9dffd8a726c99f54a47adaf372e13b8/1?pq-origsite=gscholar&cbl=1006393}, id = {9bd89770-b44a-36b0-a753-266edbdf82b4}, created = {2021-04-03T19:37:54.675Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-04-03T19:37:54.675Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Vargas-Muñoz2021470}, source_type = {article}, notes = {cited By 0}, private_publication = {false}, abstract = {This research presents the design of a prototype for the detection of cardiac arrhythmias that incorporates an embedded low-cost computational system in an environment of limited computational resources capable of analyzing characteristics of the QRS complexes. To do this, a strategy for classifying normal and pathological heart beats is developed in long-term electrocardiographic recordings (Holter), which are representative waves of the beat and their analysis allows identifying ventricular arrhythmias. For the development of this initial prototype, it is found that the use of the k nearest neighbors (k-NN) algorithm together with a stage of selection of variables from the training set is a good alternative and represents an important contribution of this work to experimental level. The experiments were carried out on the basis of cardiac arrhythmia data from the Massachusetts Institute of Technology (MIT). The results are satisfactory and promising. © 2021, Associacao Iberica de Sistemas e Tecnologias de Informacao. All rights reserved.}, bibtype = {article}, author = {Vargas-Muñoz, A M and Chamorro-Sangoquiza, D C and Umaquinga-Criollo, A C and Rosero-Montalvo, P D and Becerra, M A and Peluffo-Ordóñez, D H and Revelo-Fuelagán, E J}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao}, number = {E40} }
@inproceedings{ title = {Demand Forecasting for Textile Products Using Statistical Analysis and Machine Learning Algorithms}, type = {inproceedings}, year = {2021}, pages = {181-194}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-73280-6_15}, publisher = {Springer International Publishing}, city = {Cham}, id = {bc370ff6-8090-338a-8da1-02b01c1ad5fc}, created = {2021-04-06T00:13:56.373Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-04-06T00:13:56.373Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {leyva2021demandforecasting}, source_type = {inproceedings}, private_publication = {false}, abstract = {The generation of an accurate forecast model to estimate the future demand for textile products that favor decision-making around an organization's key processes is very important. The minimization of the model's uncertainty allows the generation of reliable results, which prevent the textile industry's economic commitment and improve the strategies adopted around production planning and decision making. That is why this work is focused on the demand forecasting for textile products through the application of artificial neural networks, from a statistical analysis of the time series and disaggregation in different time horizons through temporal hierarchies, to develop a more accurate forecast. With the results achieved, a comparison is made with statistical methods and machine learning algorithms, providing an environment where there is an adequate development of demand forecasting, improving accuracy and performance. Where all the variables that affect the productive environment of this sector under study are considered. Finally, as a result of the analysis, multilayer perceptron achieved better performance compared to conventional and machine learning algorithms. Featuring the best behavior and accuracy in demand forecasting of the analyzed textile products.}, bibtype = {inproceedings}, author = {Lorente-Leyva, Leandro L and Alemany, M M E and Peluffo-Ordóñez, Diego H and Araujo, Roberth A}, editor = {Nguyen, Ngoc Thanh and Chittayasothorn, Suphamit and Niyato, Dusit and Trawiński, Bogdan}, booktitle = {Intelligent Information and Database Systems} }
@inproceedings{ title = {Unsupervised Barter Model Based on Natural Human Interaction}, type = {inproceedings}, year = {2021}, pages = {387-400}, websites = {https://link.springer.com/chapter/10.1007/978-981-16-1685-3_32}, publisher = {Springer Singapore}, city = {Singapore}, id = {e2b6e886-ae49-349e-b793-795f8e149561}, created = {2021-04-06T01:55:09.553Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-04-06T01:55:09.553Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {fernandez2021unsupervisedbarter}, source_type = {inproceedings}, private_publication = {false}, abstract = {Human interaction is a natural process in business management. In various indigenous cultures, the natives still use a barter system to reach consensus or balances that determine the essence of their economies. The present investigation consists of the presentation of an unsupervised model based on pure barter. The main contribution sought is to visualize the balance that is achieved in an unsupervised environment of two entities that are close to reaching an agreement. Both Game Theory and Walrasian Theory deal with the problem of exchange of goods. However, the current objective is to show the barter model from its simplest bases for the construction of an unsupervised automatic learning scheme where a system of pairs of agents represent a basic model for decision making when guaranteeing an agreement.}, bibtype = {inproceedings}, author = {Fernández-Fernández, Yasmany and Lorente-Leyva, Leandro L and Peluffo-Ordóñez, Diego H and Pérez, Ridelio Miranda and Álvarez, Elia N Cabrera}, editor = {Hong, Tzung-Pei and Wojtkiewicz, Krystian and Chawuthai, Rathachai and Sitek, Pawel}, booktitle = {Recent Challenges in Intelligent Information and Database Systems} }
@article{ title = {Enhanced Convolutional-Neural-Network Architecture for Crop Classification}, type = {article}, year = {2021}, pages = {1-23}, websites = {https://www.mdpi.com/2076-3417/11/9/4292}, id = {298fbfc1-f7ea-32f3-9369-00b07379afd8}, created = {2021-05-11T19:48:15.990Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-05-11T19:58:48.224Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Automatic crop identification and monitoring is a key element in enhancing food production processes as well as diminishing the related environmental impact. Although several efficient deep learning techniques have emerged in the field of multispectral imagery analysis, the crop classification problem still needs more accurate solutions. This work introduces a competitive methodology for crop classification from multispectral satellite imagery mainly using an enhanced 2D convolutional neural network (2D-CNN) designed at a smaller-scale architecture, as well as a novel post-processing step. The proposed methodology contains four steps: image stacking, patch extraction, classification model design (based on a 2D-CNN architecture), and post-processing. First, the images are stacked to increase the number of features. Second, the input images are split into patches and fed into the 2D-CNN model. Then, the 2D-CNN model is constructed within a small-scale framework, and properly trained to recognize 10 different types of crops. Finally, a post-processing step is performed in order to reduce the classification error caused by lower-spatial-resolution images. Experiments were carried over the so-named Campo Verde database, which consists of a set of satellite images captured by Landsat and Sentinel satellites from the municipality of Campo Verde, Brazil. In contrast to the maximum accuracy values reached by remarkable works reported in the literature (amounting to an overall accuracy of about 81%, a f1 score of 75.89%, and average accuracy of 73.35%), the proposed methodology achieves a competitive overall accuracy of 81.20%, a f1 score of 75.89%, and an average accuracy of 88.72% when classifying 10 different crops, while ensuring an adequate trade-off between the number of multiply-accumulate operations (MACs) and accuracy. Furthermore, given its ability to effectively classify patches from two image sequences, this methodology may result appealing for other real-world applications, such as the classification of urban materials.}, bibtype = {article}, author = {Moreno-revelo, Mónica Y and Guachi-guachi, Lorena and Gómez-mendoza, Juan Bernardo and Revelo-fuelagán, Javier and Peluffo-ordóñez, Diego H}, journal = {Applied Sciences} }
@article{ title = {Information Quality Assessment in Fusion Systems}, type = {article}, year = {2021}, keywords = {completeness,figure 1 illustrates a,information quality,it shows the components,multi-source fusion,operational fusion process,relevance,reliability,simplified model of an,system,uncertainty,which}, pages = {1-30}, websites = {https://www.mdpi.com/2306-5729/6/6/60}, id = {af531b32-3367-3ba2-9764-5eb03dc51e6c}, created = {2021-06-08T21:27:21.702Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-06-08T21:27:38.830Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This paper provides a comprehensive description of the current literature on data fusion, with an emphasis on Information Quality (IQ) and performance evaluation. This literature review highlights recent studies that reveal existing gaps, the need to find a synergy between data fusion and IQ, several research issues, and the challenges and pitfalls in this field. First, the main models, frameworks, architectures, algorithms, solutions, problems, and requirements are analyzed. Second, a general data fusion engineering process is presented to show how complex it is to design a framework for a specific application. Third, an IQ approach, as well as the different methodologies and frameworks used to assess IQ in information systems are addressed; in addition, data fusion systems are presented along with their related criteria. Furthermore, information on the context in data fusion systems and its IQ assessment are discussed. Subsequently, the issue of data fusion systems’ performance is reviewed. Finally, some key aspects and concluding remarks are outlined, and some future lines of work are gathered.}, bibtype = {article}, author = {Becerra, M.A.; Tobón, C.; Castro-Ospina, A.E.; Peluffo-Ordóñez, D.H.}, journal = {DATA} }
@article{ title = {Addressing the Data Acquisition Paradigm in the Early Detection of Pediatric Foot Deformities}, type = {article}, year = {2021}, keywords = {academic editor,children,data analysis,embedded systems,emmanouil,machine learning,plantar pressure}, pages = {1-17}, websites = {https://www.mdpi.com/1424-8220/21/13/4422}, id = {5e5dfbfd-c953-3c5a-8917-75c9576e0965}, created = {2021-07-03T23:43:10.892Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-07-03T23:43:19.192Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The analysis of plantar pressure through podometry has allowed analyzing and detecting different types of disorders and treatments in child patients. Early detection of an inadequate distribution of the patient’s weight can prevent serious injuries to the knees and lower spine. In this paper, an embedded system capable of detecting the presence of normal, flat, or arched footprints using resistive pressure sensors was proposed. For this purpose, both hardware- and software-related criteria were studied for an improved data acquisition through signal coupling and filtering processes. Subsequently, learning algorithms allowed us to estimate the type of footprint biomechanics in preschool and school children volunteers. As a result, the proposed algorithm achieved an overall classification accuracy of 97.2%. A flat feet share of 60% was encountered in a sample of 1000 preschool children. Similarly, flat feet were observed in 52% of a sample of 600 school children.}, bibtype = {article}, author = {Rosero-montalvo, Paul D and Fuentes-hernández, Edison A and Morocho-cayamcela, Manuel E}, journal = {Sensors} }
@article{ title = {The Impact of Technological Advancements on Educational Innovation (VSI-tei)}, type = {article}, year = {2021}, keywords = {Educational innovation,Educational technologies,Future of education,Higher education}, pages = {107333}, volume = {93}, websites = {https://www.sciencedirect.com/science/article/pii/S0045790621003050}, id = {99a2262b-c57f-3c3b-aa2b-d61f2c1b217a}, created = {2021-08-03T17:46:30.768Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-08-03T17:47:52.748Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {HOSSEINI2021107333}, source_type = {article}, private_publication = {false}, abstract = {Advancement of computer technology and electrical engineering have revolutionized our lives specially our day-to-day interactions with the world. In theory, electronics and computer systems have significantly impacted what we call techno-based communications and/or high-performance computing. Computational techniques have found major importance in teaching and learning processes in normal-life settings and under special circumstances, including the current Coronavirus pandemic and subsequent economic crisis. The main contribution of such technologies involves development of mobile apps, online learning platforms, machine learning and artificial intelligent systems, game-based techniques, and e-learning, which play vital role in fostering Educational progress. The aim of this special section is to open a window of opportunity for submission of manuscripts that introduce new technologies, methods, and strategies for advancing the use of computer technology and electrical engineering for enhancing the quality of teaching and learning, and empowering Educational Innovation. This special section welcomes both original research and review articles from a wide spectrum of research, with a focus on the application of these technologies in all areas of Education and Educational Innovation. Manuscripts, or extended versions of papers presented at related conferences, are welcome as well. Submissions within the frame of following topics and related to the impact of COVID-19 on education are welcome.}, bibtype = {article}, author = {Hosseini, Samira and Peluffo, Diego and Okoye, Kingsley and Nganji, Julius Tanyu}, doi = {https://doi.org/10.1016/j.compeleceng.2021.107333}, journal = {Computers & Electrical Engineering} }
@article{ title = {Integrating Information Visualization and Dimensionality Reduction: A pathway to Bridge the Gap between Natural and Artificial Intelligence}, type = {article}, year = {2021}, keywords = {Dimensionality Reduction,Information Visualization}, volume = {24}, websites = {https://revistas.itm.edu.co/index.php/tecnologicas/article/view/2108}, id = {fac6fe23-8c32-309b-8816-d68532170ffd}, created = {2021-08-10T02:40:03.006Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-08-10T02:52:55.588Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {By importing some natural abilities from human thinking into the design of computerized decision support systems, a cross-cutting trend of intelligent systems has emerged, namely, the synergetic integration between natural and artificial intelligence [1]. While natural intelligence provides creative, parallel, and holistic thinking, its artificial counterpart is logical, accurate, able to perform complex and extensive calculations, and tireless. In the light of such integration, two concepts are important: controllability and interpretability. The former is defined as the ability of computerized systems to receive feedback and follow users’ instructions, while the latter refers to human-machine communication. A suitable alternative to simultaneously involve these two concepts—and then bridging the gap between natural and artificial intelligence—is bringing together the fields of dimensionality reduction (DimRed) and information visualization (InfoVis).}, bibtype = {article}, author = {Peluffo-ordóñez, Diego H}, journal = {TecnoLógicas} }
@article{ title = {Information fusion and information quality assessment for environmental forecasting}, type = {article}, year = {2021}, volume = {39}, websites = {https://www.sciencedirect.com/science/article/pii/S2212095521001905}, id = {ded4c0b4-4486-3620-a378-7b436a7d6683}, created = {2021-08-25T20:04:24.767Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-30T02:50:28.905Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Air pollution is a major environmental threat to human health. Therefore, multiple systems have been developed for early prediction of air pollution levels in large cities. However, deterministic models produce uncertainties due to the complexity of the physical and chemical processes of individual systems and transport. In turn, statistical and machine learning techniques require a large amount of historical data to predict the behavior of a variable. In this paper, we propose a data fusion model to spatially and temporally predict air quality and assess its situation and risk for public health. Our model is based on the Joint Directors of Laboratories (JDL) model and focused on Information Quality (IQ), which allows us to fine tune hyper-parameters in different processes and trace information from raw data to knowledge. Expert systems use the information assessment to select and process data, information, and knowledge. The functionality of our model is tested using an environmental database of the Air Quality Monitoring Network of Área Metropolitana del Valle de Aburrá (AMVA in Spanish) in Colombia. Different levels of noise are added to the data to analyze the effects of information quality on the systems' performance throughout the process. Finally, our system is compared with two conventional machine learning-based models: Deep Learning and Support Vector Regression (SVR). The results show that our proposed model exhibits better performance, in terms of air quality forecasting, than conventional models. Furthermore, its capability as a mechanism to support decision making is clearly demonstrated.}, bibtype = {article}, author = {Becerra, M. A. and Uribe, Y. and Peluffo-Ordóñez, D. H. and Karla, Álvarez-Uribe and Tobón, C.}, doi = {10.1016/j.uclim.2021.100960}, journal = {Urban Climate}, number = {August} }
@inproceedings{ title = {Generalized Spectral Dimensionality Reduction Based on Kernel Representations and Principal Component Analysis}, type = {inproceedings}, year = {2021}, pages = {512-523}, websites = {https://link.springer.com/chapter/10.1007%2F978-3-030-86973-1_36}, publisher = {Springer International Publishing}, city = {Cham}, id = {6847c8c0-ba15-3514-87e5-04a0b6f07bd6}, created = {2021-09-13T23:48:25.291Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-09-13T23:48:25.291Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-3-030-86973-1_36}, source_type = {inproceedings}, private_publication = {false}, abstract = {Very often, multivariate data analysis problems require dimensionality reduction (DR) stages to either improve analysis performance or represent the data in an intelligible fashion. Traditionally DR techniques are developed under different frameworks and settings what makes their comparison a non-trivial task. In this sense, generalized DR approaches are of great interest as they enable both to power and compare the DR techniques in a proper and fair manner. This work introduces a generalized spectral dimensionality reduction (GSDR) approach able to represent DR spectral techniques and enhance their representation ability. To do so, GSDR exploits the use of kernel-based representations as an initial nonlinear transformation to obtain a new space. Then, such a new space is used as an input for a feature extraction process based on principal component analysis. As remarkable experimental results, GSDR shows to be able to outperform the conventional implementation of well-known spectral DR techniques (namely, classical multidimensional scaling and Laplacian eigenmaps) in terms of the scaled version of the average agreement rate. Additionally, relevant insights and theoretical developments to understand the effect of data structure preservation at local and global levels are provided.}, bibtype = {inproceedings}, author = {Ortega-Bustamante, MacArthur C and Hasperué, Waldo and Peluffo-Ordóñez, Diego H and González-Vergara, Juan and Marín-Gaviño, Josué and Velez-Falconi, Martín}, editor = {Gervasi, Osvaldo and Murgante, Beniamino and Misra, Sanjay and Garau, Chiara and Blečić, Ivan and Taniar, David and Apduhan, Bernady O and Rocha, Ana Maria A C and Tarantino, Eufemia and Torre, Carmelo Maria}, booktitle = {Computational Science and Its Applications -- ICCSA 2021} }
@inproceedings{ title = {Algorithms Air Quality Estimation: A Comparative Study of Stochastic and Heuristic Predictive Models}, type = {inproceedings}, year = {2021}, pages = {293-304}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-86271-8_25}, publisher = {Springer International Publishing}, city = {Cham}, id = {e50ece29-2035-37e8-90fa-6ae4cf322fff}, created = {2021-09-15T02:10:09.053Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-24T10:24:49.390Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-3-030-86271-8_25}, source_type = {inproceedings}, private_publication = {false}, abstract = {This paper presents a comparative analysis of predictive models applied to air quality estimation. Currently, among other global issues, there is a high concern about air pollution, for this reason, there are several air quality indicators, with carbon monoxide (CO), sulfur dioxide (SO2), nitrogen dioxide (NO2) and ozone (O3) being the main ones. When the concentration level of an indicator exceeds an established air quality safety threshold, it is considered harmful to human health, therefore, in cities like London, there are monitoring systems for air pollutants. This study aims to compare the efficiency of stochastic and heuristic predictive models for forecasting ozone (O3) concentration to estimate London's air quality by analyzing an open dataset retrieved from the London Datastore portal. Models based on data analysis have been widely used in air quality forecasting. This paper develops four predictive models (autoregressive integrated moving average - ARIMA, support vector regression - SVR, neural networks (specifically, long-short term memory - LSTM) and Facebook Prophet). Experimentally, ARIMA models and LSTM are proved to reach the highest accuracy in predicting the concentration of air pollutants among the considered models. As a result, the comparative analysis of the loss function (root-mean-square error) reveled that ARIMA and LSTM are the most suitable, accomplishing a low error rate of 0.18 and 0.20, respectively.}, bibtype = {inproceedings}, author = {Sánchez-Pozo, Nadia N and Trilles-Oliver, Sergi and Solé-Ribalta, Albert and Lorente-Leyva, Leandro L and Mayorca-Torres, Dagoberto and Peluffo-Ordóñez, Diego H}, editor = {Sanjurjo González, Hugo and Pastor López, Iker and García Bringas, Pablo and Quintián, Héctor and Corchado, Emilio}, booktitle = {Hybrid Artificial Intelligent Systems} }
@inproceedings{ title = {Raspberry Pi-based IoT for shrimp farms Real-time remote monitoring with automated system}, type = {inproceedings}, year = {2021}, keywords = {atmega328p,cyberphysical system,dissolved oxygen,ecuador,salinity,shrimp farming,temperature,xbee}, pages = {7-10}, websites = {https://ieeexplore.ieee.org/document/9542907}, id = {daa1d95b-5dfe-335c-a10d-2527820ab4a6}, created = {2021-10-01T17:41:04.001Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-01T17:41:04.001Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This project analyses the optimal parameters for the shrimp farming, trying to help the aquaculture of Ecuador, using a cyberphysical system, which includes temperature, salinity, dissolved oxygen, and pH sensors to monitor the water conditions and an embedded system to control it using an XBee andATMega328p microcontrollers to remotely activate and deactivate aerators to maintain the quality of each pool in neat conditions.}, bibtype = {inproceedings}, author = {Capelo, Jesús and Ruiz, Erick and Asanza, Víctor and Toscano-quiroga, Tonny and Sánchez-pozo, Nadia N and Lorente-leyva, Leandro L and Peluffo-ordóñez, Diego Hernan}, booktitle = {2021 International Conference on Applied Electronics, AE} }
@inproceedings{ title = {Monitoring a turkey hatchery based on a cyber-physical system}, type = {inproceedings}, year = {2021}, keywords = {internet of things,meleagriculture,pid tunner toolbox,sensor,system identification toolbox}, websites = {https://ieeexplore.ieee.org/document/9542899}, id = {8da2fa6c-d173-3a99-a3f4-88698bebf25e}, created = {2021-10-01T17:59:24.009Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-01T17:59:24.009Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The implementation of a turkey farm brings with it severe environmental problems due to the deficient study of the physical space where the animals are placed. To counteract this situation and improve the quality of life in the hatchery, it is necessary to monitor and control the following variables: Temperature, Humidity, Ammonia Emission and Lux. The solution is based on a cyber-physical system which is composed of a network of sensors, controller and actuator. The sensors will provide information from the physical environment, the con- troller evaluates these parameters to execute an action to the actuator. Proportional, Integral and Derivative (PID) control defines the setpoint for temperature while Pulse- Width Modulation (PWM) adjusts the light intensity in a spotlight. The End Device executes these actions and its parameters will be sent to ThingSpeak which monitors system behavior the Internet of Things.}, bibtype = {inproceedings}, author = {Maisincho-Jivaja, Anthony and Alejandro-Sanjines, Ulbio and Asanza, Víctor and Toscano-Quiroga, Tonny and Sánchez-Pozo, Nadia N. and Lorente-Leyva, Leandro L. and Peluffo-Ordóñez, Diego Hernan}, booktitle = {2021 International Conference on Applied Electronics, AE} }
@inproceedings{ title = {A Chatterbot Based on Genetic Algorithm: Preliminary Results}, type = {inproceedings}, year = {2021}, pages = {3-12}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-89654-6_1}, publisher = {Springer International Publishing}, city = {Cham}, id = {c46f82d9-d283-3c71-9c2c-39011b64f152}, created = {2021-10-23T00:00:47.585Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-23T00:23:22.379Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-3-030-89654-6_1}, source_type = {inproceedings}, private_publication = {false}, abstract = {Chatterbots are programs that simulate an intelligent conversation with people. They are commonly used in customer service, product suggestions, e-commerce, travel and vacations, queries, and complaints. Although some works have presented valuable studies by using several technologies including evolutionary computing, artificial intelligence, machine learning, and natural language processing, creating chatterbots with a low rate of grammatical errors and good user satisfaction is still a challenging task. Therefore, this work introduces a preliminary study for the development of a GA-based chatterbot that generates intelligent dialogues with a low rate of grammatical errors and a strong sense of responsiveness, so boosting the personals satisfaction of individuals who interact with it. Preliminary results show that the proposed GA-based chatterbot yields 69\% of ``Good'' responses for typical conversations regarding orders and receipts in a cafeteria.}, bibtype = {inproceedings}, author = {Orellana, Cristian and Tobar, Martín and Yazán, Jeremy and Peluffo-Ordóñez, D and Guachi-Guachi, Lorena}, editor = {Florez, Hector and Pollo-Cattaneo, Ma Florencia}, booktitle = {Applied Informatics} }
@inproceedings{ title = {Sign Language Recognition Using Leap Motion Based on Time-Frequency Characterization and Conventional Machine Learning Techniques}, type = {inproceedings}, year = {2021}, pages = {55-67}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-89654-6_5}, publisher = {Springer International Publishing}, city = {Cham}, id = {4d865abe-82a0-3499-ad11-0ca85986f8ff}, created = {2021-10-23T00:23:22.196Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-23T00:25:06.432Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-3-030-89654-6_5}, source_type = {inproceedings}, private_publication = {false}, abstract = {The abstract should briefly summarize the contents of the paper in Sign language is the form of communication between the deaf and hearing population, which uses the gesture-spatial configuration of the hands as a communication channel with their social environment. This work proposes the development of a gesture recognition method associated with sign language from the processing of time series from the spatial position of hand reference points granted by a Leap Motion optical sensor. A methodology applied to a validated American Sign Language (ASL) Dataset which involves the following sections: (i) preprocessing for filtering null frames, (ii) segmentation of relevant information, (iii) time-frequency characterization from the Discrete Wavelet Transform (DWT). Subsequently, the classification is carried out with Machine Learning algorithms (iv). It is graded by a 97.96\% rating yield using the proposed methodology with the Fast Tree algorithm.}, bibtype = {inproceedings}, author = {López-Albán, D and López-Barrera, A and Mayorca-Torres, D and Peluffo-Ordóñez, D}, editor = {Florez, Hector and Pollo-Cattaneo, Ma Florencia}, booktitle = {Applied Informatics} }
@article{ title = {A Fast-Tracking Hybrid MPPT Based on Surface-Based Polynomial Fitting and P&O Methods for Solar PV under Partial Shaded Conditions}, type = {article}, year = {2021}, volume = {9}, websites = {https://www.mdpi.com/2227-7390/9/21/2732}, id = {8cbef8e1-d9bf-38fb-a030-2691a4fffec4}, created = {2021-10-28T19:12:14.352Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-28T19:12:14.352Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {math9212732}, source_type = {article}, private_publication = {false}, abstract = {The efficiency of photovoltaic (PV) systems depends directly on solar irradiation, so drastic variations in solar exposure will undoubtedly move its maximum power point (MPP). Furthermore, the presence of partial shading conditions (PSCs) generates local maximum power points (LMPPs) and one global maximum power point (GMPP) in the P-V characteristic curve. Therefore, a proper maximum power point tracking (MPPT) technique is crucial to increase PV system efficiency. There are classical, intelligent, optimal, and hybrid MPPT techniques; this paper presents a novel hybrid MPPT technique that combines Surface-Based Polynomial Fitting (SPF) and Perturbation and Observation (P&O) for solar PV generation under PSCs. The development of the experimental PV system has two stages: (i) Modeling the PV array with the DC-DC boost converter using a real-time and high-speed simulator (PLECS RT Box), (ii) and implementing the proposed GMPPT algorithm with the double-loop controller of the DC-DC boost converter in a commercial low-priced digital signal controller (DSC). According to the simulation and the experimental results, the suggested hybrid algorithm is effective at tracking the GMPP under both uniform and nonuniform irradiance conditions in six scenarios: (i) system start-up, (ii) uniform irradiance variations, (iii) sharp change of the (PSCs), (iv) multiple peaks in the P-V characteristic, (v) dark cloud passing, and (vi) light cloud passing. Finally, the experimental results—through the standard errors and the mean power tracked and tracking factor scores—proved that the proposed hybrid SPF-P&O MPPT technique reaches the convergence to GMPP faster than benchmark approaches when dealing with PSCs.}, bibtype = {article}, author = {González-Castaño, Catalina and Restrepo, Carlos and Revelo-Fuelagán, Javier and Lorente-Leyva, Leandro L and Peluffo-Ordóñez, Diego H}, doi = {10.3390/math9212732}, journal = {Mathematics}, number = {21} }
@article{ title = {BCI System using a Novel Processing Technique Based on Electrodes Selection for Hand Prosthesis Control}, type = {article}, year = {2021}, keywords = {Bio-signals analysis,Brain Computer Interface,Embedded Systems,FPGA,Neural Networks}, pages = {364-369}, volume = {54}, websites = {https://www.sciencedirect.com/science/article/pii/S2405896321016876}, id = {96dd79a9-f345-36a5-970f-bfdfa8ab3c19}, created = {2021-11-03T19:21:30.701Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-03T19:21:30.701Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {CONSTANTINE2021364}, source_type = {article}, notes = {11th IFAC Symposium on Biological and Medical Systems BMS 2021}, private_publication = {false}, abstract = {This work proposes an end-to-end model architecture, from feature extraction to classification using an Artificial Neural Network. The feature extraction process starts from an initial set of signals acquired by electrodes of a Brain-Computer Interface (BCI). The proposed architecture includes the design and implementation of a functional six Degree-of-Freedom (DOF) prosthetic hand. A Field Programmable Gate Array (FPGA) translates electroencephalography (EEG) signals into movements in the prosthesis. We also propose a new technique for selecting and grouping electrodes, which is related to the motor intentions of the subject. We analyzed and predicted two imaginary motor-intention tasks: opening and closing both fists and flexing and extending both feet. The model implemented with the proposed architecture showed an accuracy of 93.7% and a classification time of 8.8y«s for the FPGA. These results present the feasibility to carry out BCI using machine learning techniques implemented in a FPGA card.}, bibtype = {article}, author = {Constantine, Alisson and Asanza, Víctor and Loayza, Francis R and Peláez, Enrique and Peluffo-Ordóñez, Diego}, doi = {https://doi.org/10.1016/j.ifacol.2021.10.283}, journal = {IFAC-PapersOnLine}, number = {15} }
@article{ title = {Classification of Subjects with Parkinson’s Disease using Finger Tapping Dataset}, type = {article}, year = {2021}, keywords = {Classification,Finger Tapping,Machine Learning,Parkinson’s disease}, pages = {376-381}, volume = {54}, websites = {https://www.sciencedirect.com/science/article/pii/S2405896321016906}, id = {81983f44-0b79-394f-abd7-142f12e2683a}, created = {2021-11-03T19:26:44.224Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-03T19:26:44.224Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {ASANZA2021376}, source_type = {article}, notes = {11th IFAC Symposium on Biological and Medical Systems BMS 2021}, private_publication = {false}, abstract = {Parkinson’s disease is the second most common neurodegenerative disorder and affects more than 7 million people globally. In this work, we classify subjects with Parkinson’s disease using data from finger-tapping on a keyboard. We use a free database by Physionet with more than 9 million records, preprocessed to delete atypical data. In the feature extraction stage, we obtained 48 features. We use Google Colaboratory to train, validate, and test nine supervised learning algorithms that detect the disease. As a result, we achieve a degree of accuracy higher than 98%.}, bibtype = {article}, author = {Asanza, Víctor and Sánchez-Pozo, Nadia N and Lorente-Leyva, Leandro L and Peluffo-Ordóñez, Diego Hernan and Loayza, Fancis R and Peláez, Enrique}, doi = {https://doi.org/10.1016/j.ifacol.2021.10.285}, journal = {IFAC-PapersOnLine}, number = {15} }
@article{ title = {Comparison of current deep convolutional neural networks for the segmentation of breast masses in mammograms}, type = {article}, year = {2021}, websites = {https://ieeexplore.ieee.org/document/9614200}, id = {342ba596-7804-32c9-a6ac-e92e6e033af6}, created = {2021-11-14T02:18:51.797Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T02:18:51.797Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {9614200}, source_type = {article}, private_publication = {false}, abstract = {Breast cancer causes approximately 684,996 deaths worldwide, making it the leading cause of female cancer mortality. However, these figures can be reduced with early diagnosis through mammographic imaging, allowing for the timely and effective treatment of this disease. To establish the best tools for contributing to the automatic diagnosis of breast cancer, different deep learning (DL) architectures were compared in terms of breast lesion segmentation, lesion type classification, and degree of suspicion of malignancy tests. The tasks were completed with state-of-the-art architectures and backbones. Initially, during segmentation, the base UNet, Visual Geometry Group 19 (VGG19), InceptionResNetV2, EfficientNet, MobileNetv2, ResNet, ResNeXt, MultiResUNet, linkNet-VGG19, DenseNet, SEResNet and SeResNeXt architectures were compared, where “Res” denotes a residual network. In addition, training was performed with 5 of the most advanced loss functions and validated by the Dice coefficient, sensitivity, and specificity. The proposed models achieved Dice values above 90%, with the EfficientNet architecture achieving 94.75% and 99% accuracy on the two tasks. Subsequently, classification was addressed with the ResNet50V2, VGG19, InceptionResNetV2, DenseNet121, InceptionV3, Xception and EfficientNetB7 networks. The proposed models achieved 96.97% and 97.73% accuracy through the VGG19 and ResNet50V2 networks on the lesion classification and degree of suspicion tasks, respectively. All three tasks were addressed with open-access databases, including the Digital Database for Screening Mammography (DDSM), the Mammographic Image Analysis Society (MIAS) database, and INbreast.}, bibtype = {article}, author = {Anaya-Isaza, Andrés and Mera-Jiménez, Leonel and Cabrera-Chavarro, Johan and Guachi-Guachi, Lorena and Peluffo-Ordóñez, Diego and Rios-Patiño, Jorge}, doi = {10.1109/ACCESS.2021.3127862}, journal = {IEEE Access} }
@inproceedings{ title = {Developments on Support Vector Machines for Multiple-Expert Learning}, type = {inproceedings}, year = {2021}, pages = {587-598}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-91608-4_57}, publisher = {Springer International Publishing}, city = {Cham}, id = {7b464997-cc10-3abb-8b0b-db79ee53bbcc}, created = {2021-11-24T10:24:49.160Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-24T10:24:49.160Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {10.1007/978-3-030-91608-4_57}, source_type = {inproceedings}, private_publication = {false}, abstract = {In supervised learning scenarios, some applications require solve a classification problem wherein labels are not given as a single ground truth. Instead, the criteria of a set of experts is used to provide labels aimed at compensating for the erroneous influence with respect to a single labeler as well as the error bias (excellent or lousy) due to the level of perception and experience of each expert. This paper aims to briefly outline mathematical developments on support vector machines (SVM), and overview SVM-based approaches for multiple expert learning (MEL). Such MEL approaches are posed by modifying the formulation of a least-squares SVM, which enables to obtain a set of reliable, objective labels while penalizing the evaluation quality of each expert. Particularly, this work studies both two-class (binary) MEL classifier (BMLC) and its extension to multiclass through one-against all (OaA-MLC) including penalization of each expert's influence. Formal mathematical developments are stated, as well as remarkable discussion on key aspects about the least-squares SVM formulation and penalty factors are provided.}, bibtype = {inproceedings}, author = {Umaquinga-Criollo, Ana C and Tamayo-Quintero, Juan D and Moreno-García, María N and Aalaila, Yahya and Peluffo-Ordóñez, Diego H}, booktitle = {Intelligent Data Engineering and Automated Learning -- IDEAL 2021} }
@article{ title = {Dynamic Modeling of a Proton-Exchange Membrane Fuel Cell Using a Gaussian Approach}, type = {article}, year = {2021}, volume = {11}, websites = {https://www.mdpi.com/2077-0375/11/12/953}, id = {809bb213-345e-36a5-b26d-18189f36cd24}, created = {2021-12-02T14:49:02.204Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-12-02T14:49:02.204Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {membranes11120953}, source_type = {article}, private_publication = {false}, abstract = {This paper proposes a Gaussian approach for the proton-exchange membrane fuel cell (PEMFC) model that estimates its voltage behavior from the operating current value. A multi-parametric Gaussian model and an unconstrained optimization formulation based on a conventional non-linear least squares optimizer is mainly considered. The model is tested using experimental data from the Ballard Nexa 1.2 kW fuel cell (FC). This methodology offers a promising approach for static and current-voltage, characteristic of the three regions of operation. A statistical study is developed to evaluate the effectiveness and superiority of the proposed FC Gaussian model compared with the Diffusive Global model and the Evolution Strategy. In addition, an approximation to the exponential function for a Gaussian model simplification can be used in systems that require real-time emulators or complex long-time simulations.}, bibtype = {article}, author = {González-Castaño, Catalina and Lorente-Leyva, Leandro L and Alpala, Janeth and Revelo-Fuelagán, Javier and Peluffo-Ordóñez, Diego H and Restrepo, Carlos}, doi = {10.3390/membranes11120953}, journal = {Membranes}, number = {12} }
@inproceedings{ title = {Implementation of a two-loop digital control for high voltage DC-DC buck-boost converter with coupled inductor}, type = {inproceedings}, year = {2021}, pages = {1-6}, websites = {https://ieeexplore.ieee.org/document/9645983}, id = {b9acb544-7654-36c2-a85b-5ad7a6f8eeec}, created = {2022-01-26T02:50:05.066Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2022-02-04T07:12:16.452Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, source_type = {INPROCEEDINGS}, private_publication = {false}, bibtype = {inproceedings}, author = {González-Castaño, Catalina and Madrid, Emerson and Naranajo, Walter and Restrepo, Carlos and Revelo-Fuelagán, Javier and Peluffo-Ordoñez, Diego H}, doi = {10.1109/COMPEL52922.2021.9645983}, booktitle = {2021 IEEE 22nd Workshop on Control and Modelling of Power Electronics (COMPEL)} }
@article{ title = {Analytic study on the performance of multi-classification approaches in case-based reasoning systems: Medical data exploration}, type = {article}, year = {2020}, keywords = {Case based reasoning,Classifiers fusion,Dermatology disease,Heart disease}, websites = {https://search.proquest.com/docview/2451420129/fulltextPDF/F4AF5E590BD14D5EPQ/9}, id = {23afd466-7e72-3ef2-bf3b-921dce5b890a}, created = {2020-12-29T22:52:03.375Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.751Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Bastidas2020}, private_publication = {false}, abstract = {This paper compares the main combinations of classifiers (Sequential, Parallel and Stacking) over two remarkable medical data collections: Cleveland and Dermatology. The principal rationale underlying the use of multiple classifiers is that together the methods may be powered rather than their individual behavior. Such a premise is validated through the identification of the best the combination reaching the lowest error rate within a case-based reasoning system (CBR). The different combinations are essentially formed by five different classifiers greatly different regarding their nature and inception: SVM (Support Vector Machines), Parzen, Random Forest, K-NN (k-nearest neighbors) and Naive Bayes. From experimental results, it can be inferred that the combination of techniques is greatly useful. Also, in this work, some key aspects and hints are discussed about the relationship between the nature of the input data and the classification (either individual or mixture of classifiers) stage building within a CBR framework.}, bibtype = {article}, author = {Bastidas, David and Piñeros, Camilo and Peluffo-Ordóñez, Diego H. and Sierra, Luz Marina and Becerra, Miguel A. and Umaquinga-Criollo, Ana C.}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@article{ title = {Comparative study of data mining techniques to reveal patterns of academic performance in secondary education}, type = {article}, year = {2020}, keywords = {Academic performance patterns,Classifiers,Feature selection,Matlab,Multiple classifier}, websites = {https://search.proquest.com/docview/2452331372/fulltextPDF/64A2741CD0B646EAPQ/1}, id = {721c4fe9-e59b-3a32-aedd-8d9acfe7abeb}, created = {2020-12-29T22:52:03.615Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.995Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Chamorro-Sangoquiza2020}, private_publication = {false}, abstract = {The data mining techniques allow for unveiling knowledge from large volumes of information, which have recently been explored in information analysis by educational institutions but already with an increasing demand for this sector to support decision-making. In this research, a methodology for comparing data mining techniques is proposed, which is to be applied to the analysis of academic patrons in students of media education. Multiple methods of selecting attributes are applied to reduce the dimensionality and compare three classifiers and multi-classifiers. The experiments are carried out in a dataset of 285 instances and 36 attributes obtained from an educational survey applied to the students of the School of Education of the University of Barcelona 2017-2018. The best results of classification achieved by the multi-splitter Boosted Tree and Bagged Tree with 93.24% accuracy using the data selected using the BestFirst algorithm.}, bibtype = {article}, author = {Chamorro-Sangoquiza, Diana C. and Vargas-Muñoz, Andrés M. and Umaquinga-Criollo, Ana C. and Becerra, Miguel A. and Peluffo-Ordóñez, Diego H.}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@article{ title = {Knee joint angle measuring portable embedded system based on inertial measurement units for gait analysis}, type = {article}, year = {2020}, keywords = {Gait analysis,IMU,Kalman filter,Knee-joint angle,Motion analysis}, websites = {http://ijaseit.insightsociety.org/index.php?option=com_content&view=article&id=9&Itemid=1&article_id=10814}, id = {b8ffb276-e810-315d-8b41-25f3e2706262}, created = {2020-12-29T22:52:04.044Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.334Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Mayorca-Torres2020}, private_publication = {false}, abstract = {Inside clinical research, gait analysis is a fundamental part of the functional evaluation of the human body's movement. Its evaluation has been carried out through different methods and tools, which allow early diagnosis of diseases, and monitoring and assessing the effectiveness of therapeutic plans applied to patients for rehabilitation. The observational method is one of the most used in specialized centers in Colombia; however, to avoid any possible errors associated with the subjectivity observation, technological tools that provide quantitative data can support this method. This paper deals with the methodological process for developing a computational tool and hardware device for the analysis of gait, specifically on articular kinematics of the knee. This work develops a prototype based on the fusion of inertial measurement units (IMU) data as an alternative for the attenuation of errors associated with each of these technologies. A videogrammetry technique measured the same human gait patterns to validate the proposed system, in terms of accuracy and repeatability of the recorded data. Results showed that the developed prototype successfully captured the kneejoint angles of the flexion-extension motions with high consistency and accuracy in with the measurements obtained from the videogrammetry technique. Statistical analysis (ICC and RMSE) exhibited a high correlation between the two systems for the measures of the joint angles. These results suggest the possibility of using an IMU-based prototype in realistic scenarios for accurately tracking a patient's knee-joint kinematics during a human gait.}, bibtype = {article}, author = {Mayorca-Torres, Dagoberto and Caicedo-Eraso, Julio C. and Peluffo-Ordóñez, Diego H.}, doi = {10.18517/ijaseit.10.2.10814}, journal = {International Journal on Advanced Science, Engineering and Information Technology} }
@article{ title = {Forecasting the Consumer Price Index (CPI) of Ecuador: A comparative study of predictive models}, type = {article}, year = {2020}, keywords = {Consumer Price Index (CPI),Ecuador,Forecasting,Predictive models}, websites = {http://ijaseit.insightsociety.org/index.php?option=com_content&view=article&id=9&Itemid=1&article_id=10813}, id = {97d9c6f8-2878-3cd9-8839-f5bfe2d4b3be}, created = {2020-12-29T22:52:04.211Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.481Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Riofrio2020}, private_publication = {false}, abstract = {The Consumer Price Index (CPI) is one of the most important economic indicators for countries' characterization and is typically considered an official measure of inflation. The CPI considers the monthly price variation of a determined set of goods and services in a specific region, and it is key in the economic and social planning of a given country, hence the great importance of CPI forecasting. In this paper, we outline a comparative study of state-of-the-art predictive models over an Ecuadorian CPI dataset with 174 monthly registers, from 2005 to 2019. This small available dataset makes forecasting a challenging time-series-prediction task. Another difficulty is last years trend variation, which since mid-2016, has changed from an upward average of 3.5 points to a stable trend of ±0.8 points. This paper explores the performance of relevant predictive models when tackling the Ecuadorian CPI forecasting problem accurately for the next 12 months. For this, a comparative study considering a variety of predictive models is carried out, including the Neural networks approach using a Sequential Model with Long Short-Term Memory layers machine learning using Support Vector Regression, as well as classical approaches like SARIMA and Exponential Smoothing. We also consider big corporations tools like Facebook Prophet. As a result, the paper presents the best predictive models, and parameters found, along with Ecuadors CPI forecasting for the next 12 months (part of 2020). This information could be used for decisionmaking in several important topics related to social and economic activities.}, bibtype = {article}, author = {Riofrío, Juan and Chang, Oscar and Revelo-Fuelagán, E. J. and Peluffo-Ordóñez, Diego H.}, doi = {10.18517/ijaseit.10.3.10813}, journal = {International Journal on Advanced Science, Engineering and Information Technology} }
@inbook{ type = {inbook}, year = {2020}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-61705-9_36}, id = {690e90fa-5aa1-3e69-bc49-f540b8bbea2a}, created = {2020-12-29T22:52:04.670Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T19:46:20.501Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Umaquinga-Criollo2020}, private_publication = {false}, abstract = {The use of machine learning into economics scenarios results appealing since it allows for automatically testing economic models and predict consumer/client behavior to support decision-making processes. The finance market typically uses a set of expert labelers or Bureau credit scores given by governmental or private agencies such as Experian, Equifax, and Creditinfo, among others. This work focuses on introducing a so-named Bag of Expert (BoE): a novel approach for creating multi-expert Learning (MEL) frameworks aimed to emulate real experts labeling (human-given labels) using neural networks. The MEL systems “learn” to perform decision-making tasks by considering a uniform number of labels per sample or individuals along with respective descriptive variables. The BoE is created similarly to Generative Adversarial Network (GANs), but rather than using noise or perturbation by a generator, we trained a feed-forward neural network to randomize sampling data, and either add or decrease hidden neurons. Additionally, this paper aims to investigate the performance on economics-related datasets of several state-of-the-art MEL methods, such as GPC, GPC-PLAT, KAAR, MA-LFC, MA-DGRL, and MA-MAE. To do so, we develop an experimental framework composed of four tests: the first one using novice experts; the second with proficient experts; the third is a mix of novices, intermediate and proficient experts, and the last one uses crowd-sourcing. Our BoE method presents promising results and can be suitable as an alternative to properly assess the reliability of both MEL methods and conventional labeler generators (i.e., virtual expert labelers).}, bibtype = {inbook}, author = {Umaquinga-Criollo, A. C. and Tamayo-Quintero, J. D. and Moreno-García, M. N. and Riascos, J. A. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-030-61705-9_36}, chapter = {Multi-expert Methods Evaluation on Financial and Economic Data: Introducing Bag of Experts}, title = {Lecture Notes in Computer Science} }
@article{ title = {Evaluation of characterization techniques for classification of seismic-volcanic signals of the nevado del ruiz}, type = {article}, year = {2020}, keywords = {Cepstral,Characterization,Classification,Machine Learning,Seismic-volcanic}, websites = {https://search.proquest.com/docview/2350120798}, id = {5090d84d-a37a-3ec8-b870-65efd22c9f9f}, created = {2020-12-29T22:52:05.145Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.272Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Bravo2020}, private_publication = {false}, abstract = {Currently, researches have been carried out on automatic classification of seismic-volcanic events-mainly based on machine learning techniques-aimed at identifying the nature of the recorded event. In this sense, several approaches have been introduced. Nonetheless, due to these signals’ variability, there is no still a conclusive method of characterization, and it is in fact an open and challenging research problem. In this work, a methodology for comparing features extraction techniques is developed aimed at the discrimination of seismic events of volcanic origin. Representation of the signals in the domain of time, frequency, time-frequency and Cepstral is used. The set of attributes is optimized by selecting characteristics by assigning weights. A supervised classification is executed using known records. Finally, classification performance measures were obtained to determine the subset of characteristics that best represent and discriminate the signals.}, bibtype = {article}, author = {Bravo, Yoiner Erazo and Narváez, Edison Rosero and Cabrera, Paola Castro and Bonilla, John Londoño and Ordoñez, Diego Peluffo}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@article{ title = {Intelligent WSN system for water quality analysis using machine learning algorithms: A case study (Tahuando river from Ecuador)}, type = {article}, year = {2020}, keywords = {Prototype selection,River pollution,Supervised classification,WSN}, websites = {https://www.mdpi.com/2072-4292/12/12/1988}, id = {bb5f0e61-e7e0-3cef-9015-5af04992d802}, created = {2020-12-29T22:52:05.343Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-04-02T01:35:31.095Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2020b}, private_publication = {false}, abstract = {This work presents a wireless sensor network (WSN) system able to determine the water quality of rivers. Particularly, we consider the Tahuando River from Ibarra, Ecuador, as a case study. The main goal of this research is to determine the river's status throughout its route, by generating data reports into an interactive user interface. To this end, we use an array of sensors collecting several measures such as: turbidity, temperature, water quality, pH, and temperature. Subsequently, from the information collected on an Internet-of-Things (IoT) server, we develop a data analysis scheme with both data representation and supervised classification. As an important result, our system outputs a map that shows the contamination levels of the river at different regions. Furthermore, in terms of data analysis performance, the proposed system reduces the data matrix by 97% from its original size, while it reaches a classification performance over 90%. Furthermore, as an additional remarkable result, we here introduce the so-called quantitative metric of balance (QMB), which measures the balance or ratio between performance and power consumption.}, bibtype = {article}, author = {Rosero-Montalvo, Paul D. and López-Batista, Vivian F. and Riascos, Jaime A. and Peluffo-Ordóñez, Diego H.}, doi = {10.3390/rs12121988}, journal = {Remote Sensing} }
@article{ title = {Analysis of the thermal behavior in the goldwind S50/750 wind turbines installed in the wind farm gibara ii using cad-cae tools}, type = {article}, year = {2020}, keywords = {Goldwind Wind Turbines,Temperatures & CAD/CAE Tools,Thermal Behavior,Wind Farm}, websites = {http://www.tjprc.org/view_paper.php?id=12471}, id = {ade93bea-0b46-3994-8603-12960981796a}, created = {2020-12-29T22:52:11.955Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.864Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Feliciano2020}, private_publication = {false}, abstract = {This study indicates the thermal behavior inside the gondola for models S50–750 of Goldwind wind turbines installed in the Wind Farm Gibara II. It allows the early diagnosis of incipient failures that occur in the studied devices because of the high temperatures generated in its components under the operating conditions of Cuba. It works in the obtention of the values of the thermal state using forecast statistics and computer-aided design and engineering software (CAD-CAE), such as SolidWorks and its Flow Simulation add-on. This research supports its theories and postulates in the study of six assembled devices of Chinese origin, which have been in operation for nine years. For that purpose, we used a database that collects the temperature measurements in different working conditions and points inside the gondola.}, bibtype = {article}, author = {Feliciano, Yorley Arbella and Trinchet, Carlos A. and Meléndez, Ernesto and Lorente-Leyva, Leandro L. and Peluffo-Ordóñez, Diego H.}, doi = {10.24247/ijmperdapr202032}, journal = {International Journal of Mechanical and Production Engineering Research and Development} }
@article{ title = {Data fusion and information quality for biometric identification from multimodal signals}, type = {article}, year = {2020}, keywords = {Biometry,Data fusion,Information quality,Signal processing}, websites = {https://search.proquest.com/docview/2385757504?pq-origsite=gscholar&fromopenview=true}, id = {7b23f79a-33d8-3e5a-b69e-90aeee9f9eec}, created = {2020-12-29T22:52:12.009Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.015Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2020a}, private_publication = {false}, abstract = {Biometric identification is carried out by processing physiological traits and signals. Biometrics systems are an open field of research and development, since they are permanently susceptible to attacks demanding permanent development to maintain their confidence. The main objective of this study is to analyze the effects of the quality of information on biometric identification and consider it in access control systems. This paper proposes a data fusion model for the development of biometrics systems considering the assessment of information quality. This proposal is based on the JDL (Joint Directors of Laboratories) data fusion model, which includes raw data processing, pattern detection, situation assessment and risk or impact. The results demonstrated the functionality of the proposed model and its potential compared to other traditional identification models.}, bibtype = {article}, author = {Becerra, Miguel A. and Lasso-Arciniegas, Laura and Viveros, Andrés and Serna-Guarín, Leonardo and Peluffo-Ordóñez, Diego and Tobón, Catalina}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@inproceedings{ title = {A new approach of service platform for water optimization in lettuce crops using wireless sensor network}, type = {inproceedings}, year = {2020}, keywords = {Cloud Computing,Irrigation,Precision agriculture,WSN}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-52249-0_29}, id = {c223c355-2de9-34eb-9c59-62fe59df5b40}, created = {2020-12-29T22:52:12.123Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.987Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Maya-Olalla2020}, private_publication = {false}, abstract = {Wireless sensor network is implemented and communicated with the cloud through IPv6. The entire system is applied to precision irrigation systems for lettuce crops in Ecuador. The main objective is to provide optimization system for irrigation water for productive purposes and providing crops with the adequate amount of water needed for surviving and producing. To do that the system has a data acquisition system by sensors and this data is stored in web services. By improving the irrigation system crops can be planted throughout the year including summer, the system has a remarkable result for efficient water savings and lettuce crops.}, bibtype = {inproceedings}, author = {Maya-Olalla, Edgar and Domínguez-Limaico, Hernán and Vásquez-Ayala, Carlos and Jaramillo-Vinueza, Edgar and Zambrano V, Marcelo and Jácome-Ortega, Alexandra and Rosero-Montalvo, Paul D. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-030-29513-4_1}, booktitle = {Advances in Intelligent Systems and Computing} }
@article{ title = {Stochastic-and neuro-fuzzy-analysis-based characterization and classification of 4-channel phonocardiograms for cardiac murmur detection}, type = {article}, year = {2020}, keywords = {ANFIS,Cardiac murmur,Empirical mode decomposition,Hidden markov models,Phonocardiogram}, websites = {https://search.proquest.com/docview/2451419849/fulltextPDF/F4AF5E590BD14D5EPQ/8}, id = {88c99fb9-e19a-3f35-8b80-75e8ed5d87ca}, created = {2020-12-29T22:52:12.350Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-08-25T21:58:34.313Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2020}, private_publication = {false}, abstract = {Cardiac murmurs (CMs) are the most common heart’s diseases that are typically diagnosed from phonocardiogram (PCG) and echocardiogram tests-often supported by computerized systems. Research works have traditionally addressed the automatic CM diagnosis with no distinctively use of the four auscultation areas (one of each cardiac valve), resulting-most probably-in a constrained, nonimpartial diagnostic procedure. This study presents a comparison among four different CM detection systems from a 4-channel PCG. We first evaluate the acoustic characteristics derived from Mel-Frequency Cepstral Coefficients, Empirical Mode Decomposition (EMD), and statistical measures. Secondly, a relevance analysis is carried out using Fuzzy Rough Feature Selection. Thirdly, Hidden Markov Models (HMM), Adaptative Neuro-Fuzzy Inference System (ANFIS), Naïve Bayes, and Gaussian Mixture Model were applied for classification and validated using a 50fold cross-validation procedure with a 70/30 split demonstrating the functionality and capability of EMD, Hidden Markov Model and ANFIS for CM classification.}, bibtype = {article}, author = {Becerra, Miguel A. and Delgado-Trejos, Edilson and Mejía-Arboleda, Cristian and Peluffo-Ordóñez, Diego H. and Umaquinga-Criollo, Ana C.}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@inbook{ type = {inbook}, year = {2020}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-62833-8_7}, id = {d48d1791-f755-3c6c-87c0-3d13142e98b4}, created = {2020-12-29T22:52:12.562Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T20:00:09.816Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Patino-Alarcon2020}, private_publication = {false}, abstract = {This research presents an application of the clustering based on Thinking Based - Learning methodology (TBL), which offers guidelines to promote students’ reflective thinking. Within this methodology, the Intelligence Execution Theory (IET) tool will be used to encourage this kind of thinking in the classroom. Having in mind that, in any educational process, methodologies and pedagogical tools have a pivotal role as they are one of the bases for optimizing cognitive intelligence. In this case, it was given a priority to the potential development of a specific linguistic skill. This study presented a mixed methodology with an exploratory and descriptive scope. The main objective of this research was the clustering of the variables of functioning of the reading ability in the English language based on the TBL methodology and its behavior in the left hemisphere of the brain, specifically to analyze the improvement of the reading ability in the English language of the participants of this case study. With the expectation of generating sustainability of adequate levels of performance, instruction and learning of the English language of students at all levels.}, bibtype = {inbook}, author = {Patiño-Alarcón, Delio R. and Patiño-Alarcón, Fernando A. and Lorente-Leyva, Leandro L. and Peluffo-Ordóñez, Diego H.}, doi = {10.1007/978-3-030-62833-8_7}, chapter = {Clustering of Reading Ability Performance Variables in the English Language Based on TBL Methodology and Behavior in the Left Hemisphere of the Brain}, title = {Communications in Computer and Information Science} }
@inproceedings{ title = {Design and Tests to Implement Hyperconvergence into a DataCenter: Preliminary Results}, type = {inproceedings}, year = {2020}, keywords = {Availability,Data Center,Hyperconvergence,Proxmox}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-32022-5_6}, id = {86dea8af-a6eb-3fa3-9bb3-fc2913f7f475}, created = {2020-12-29T22:52:12.657Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.520Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Maya-Olalla2020a}, private_publication = {false}, abstract = {Hyperconvergence is a new technological trend that integrates and centralizes the functions of network, storage and computing in a single infrastructure, facilitating the administration, operability and scalability of a Data Center as a whole, benets that do not provide an architecture of traditional network or virtualization-specic technologies. This research based on qualitative and experimental methods suggests a model of Implementation of a HyperConvergent Architecture for the management of the Data Center of the Universidad Técnica del Norte, as a competitive and high-performance Open Source alternative for the integration of physical and virtual components. The suggested deployment model is based on the virtualization platform Proxmox VE, CEPH (Storage Software Platform), vSwitch (network scheme) and KVM (equipment virtualization). It includes a centralized domain and it provides a 99.88% availability rate making it in total harmony with functionalities requiring high availability. The results show the simplicity of the system: efficient execution of all applications, migrations of virtual machines from node to node, inactivity times between 50.3 ms and 53 ms, processing acceleration providing agility to IT operations without forgetting that its implementation and its start-up times are relatively low.}, bibtype = {inproceedings}, author = {Maya-Olalla, Edgar and Dominguez-Limaico, Mauricio and Meneses-Narvaez, Santiago and Rosero-Montalvo, Paul D. and Narvaez-Pupiales, Sandra and Zambrano Vizuete, Marcelo and Peluffo-Ordóñez, Diego H.}, doi = {10.1007/978-3-030-32022-5_6}, booktitle = {Advances in Intelligent Systems and Computing} }
@article{ title = {Hybrid Embedded-Systems-based Approach to in-Driver Drunk Status Detection using Image Processing and Sensor Networks}, type = {article}, year = {2020}, websites = {https://ieeexplore.ieee.org/document/9258992}, id = {df4125c6-5743-3ef7-b53d-c0afe32615e6}, created = {2020-12-29T22:52:12.831Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.633Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2020a}, private_publication = {false}, abstract = {Car drivers under the influence of alcohol is one of the most common causes of road traffic accidents. To tackle this issue, an emerging, suitable alternative is the use of intelligent systems -traditionally based on either sensor networks or artificial vision- that are aimed to prevent starting the car when drunk status on the car driver is detected. In such vein, this paper introduces a system whose main objective is identifying a person having alcohol in the blood through supervised classification of sensor-generated and computer-vision-based data. To do so, some drunk-status criteria are considered, namely: the concentration of alcohol in the car environment, the facial temperature of the driver and the pupil width. Specifically, for data acquisition purposes, the proposed system incorporates a gas sensor, temperature sensor and a digital camera. Acquired data are analyzed into a two-stages machine learning system consisting of feature selection and supervised classification algorithms. Both acquisition and analysis stages are to be performed into a embedded system, and therefore all procedures and algorithms are designed to work at low-computational resources. As a remarkable outcome, due mainly to the incorporation of feature selection and relevance analysis stages, proposed approach reaches a classification performance of 98% while ensures adequate operation conditions for the embedded system.}, bibtype = {article}, author = {Rosero-Montalvo, Paul D. and Lopez-Batista, Vivian F. and Peluffo-Ordonez, Diego H.}, doi = {10.1109/jsen.2020.3038143}, journal = {IEEE Sensors Journal} }
@article{ title = {Structural capital model for universities based on JDL data fusion model and information quality}, type = {article}, year = {2020}, keywords = {Data fusion,Information quality,Intelectual capital,JDL model,Structural capital}, websites = {https://search.proquest.com/docview/2394535766}, id = {56def723-a95b-3d80-99eb-f7001aca81ff}, created = {2020-12-29T22:52:12.832Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.488Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2020b}, private_publication = {false}, abstract = {Intellectual capital is one of the most critical intangible active assets for universities, and there are multiple models to value it through the human, structural, and relational components. However, this is an open field of research that still demands new solutions to assess it effectively from each of its components. For the assessment of the structural component in higher education institutions, this study proposes a model that combines the assessment of the quality of information and the JDL data fusion model (joint directors of laboratories), which has been used in applications military. The proposed model is original in the methods used and their association, distributed in six levels that execute the pre-processing of the information, valuation of objects, valuation of the situation and the risk, and the refinement of the process. Besides, it evaluates the quality of the information, its traceability, and context to refine the process and obtain a more objective assessment taking into account the imperfection of the information for decision-making in the management of impact and risk. The model not only allows the assessment of structural capital, but also supports decision-making based on the quality of information and its impact. The functionality of the model is described by levels.}, bibtype = {article}, author = {Becerra, Miguel A. and Londoño-Montoya, Erika and Serna-Guarín, Leonardo and Peluffo-Ordóñez, Diego and Tobón, Catalina and Giraldo, Lillyana}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@inproceedings{ title = {A Data-Driven Approach for Automatic Classification of Extreme Precipitation Events: Preliminary Results}, type = {inproceedings}, year = {2020}, keywords = {Data driven,Extreme precipitation,Feature selection,Forecasting,PCA,Relief,SVM}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-61702-8_14}, id = {8bba44a7-bd1e-3dc7-b2ce-0f1b8d50538d}, created = {2020-12-29T22:52:13.337Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.601Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Gonzalez-Vergara2020}, private_publication = {false}, abstract = {Even though there exists no universal definition, in the South America Andean Region, extreme precipitation events can be referred to the period of time in which standard thresholds of precipitation are abruptly exceeded. Therefore, their timely forecasting is of great interest for decision makers from many fields, such as: urban planning entities, water researchers and in general, climate related institutions. In this paper, a data-driven study is performed to classify and anticipate extreme precipitation events through hydroclimate features. Since the analysis of precipitation-events-related time series involves complex patterns, input data requires undergoing both pre-processing steps and feature selection methods, in order to achieve a high performance at the data classification stage itself. In this sense, in this study, both individual Principal Component Analysis (PCA) and Regresional Relief (RR) as well as a cascade approach mixing both are considered. Subsequently, the classification is performed by a Support-Vector-Machine-based classifier (SVM). Results reflect the suitability of an approach involving feature selection and classification for precipitation events detection purposes. A remarkable result is the fact that a reduced dataset obtained by applying RR mixed with PCA discriminates better than RR alone but does not significantly hence the SVM rate at two- and three-class problems as done by PCA itself.}, bibtype = {inproceedings}, author = {González-Vergara, J. and Escobar-González, D. and Chaglla-Aguagallo, D. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-030-61702-8_14}, booktitle = {Communications in Computer and Information Science} }
@article{ title = {A data set for electric power consumption forecasting based on socio-demographic features: Data from an area of southern Colombia}, type = {article}, year = {2020}, keywords = {Electric power consumption,Forecasting,Machine learning,Smart grid,Socio-demographic data}, websites = {https://www.sciencedirect.com/science/article/pii/S2352340920301402}, id = {5870a7ea-adc1-3e96-9f37-b16dab7c64e5}, created = {2020-12-29T22:52:14.175Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.291Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Parraga-Alava2020}, private_publication = {false}, abstract = {In this article, we introduce a data set concerning electric-power consumption-related features registered in seven main municipalities of Nariño, Colombia, from December 2010 to May 2016. The data set consists of 4427 socio-demographic characteristics, and 7 power-consumption-referred measured values. Data were fully collected by the company Centrales Eléctricas de Nariño (CEDENAR) according to the client consumption records. Power consumption data collection was carried following a manual procedure wherein company workers are in charge of manually registering the readings (measured in kWh) reported by the electric energy meters installed at each housing/building. Released data set is aimed at providing researchers a suitable input for designing and assessing the performance of forecasting, modelling, simulation and optimization approaches applied to electric power consumption prediction and characterization problems. The data set, so-named in shorthand PCSTCOL, is freely and publicly available at https://doi.org/10.17632/xbt7scz5ny.3.}, bibtype = {article}, author = {Parraga-Alava, Jorge and Moncayo-Nacaza, Jorge Dario and Revelo-Fuelagán, Javier and Rosero-Montalvo, Paul D. and Anaya-Isaza, Andrés and Peluffo-Ordóñez, Diego Hernán}, doi = {10.1016/j.dib.2020.105246}, journal = {Data in Brief} }
@inproceedings{ title = {Inverse Data Visualization Framework (IDVF): Towards a Prior-Knowledge-Driven Data Visualization}, type = {inproceedings}, year = {2020}, keywords = {Data visualization,Dimensionality reduction,Interaction model,Kernel functions}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-61702-8_19}, id = {85eeb197-aabc-382f-b001-5b0a7b807e3a}, created = {2020-12-29T22:52:16.133Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.887Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Velez-Falconi2020}, private_publication = {false}, abstract = {Broadly, the area of dimensionality reduction (DR) is aimed at providing ways to harness high dimensional (HD) information through the generation of lower dimensional (LD) representations, by following a certain data-structure-preservation criterion. In literature there have been reported dozens of DR techniques, which are commonly used as a pre-processing stage withing exploratory data analyses for either machine learning or information visualization (IV) purposes. Nonetheless, the selection of a proper method is a nontrivial and -very often- toilsome task. In this sense, a readily and natural way to incorporate an expert’s criterion into the analysis process, while making this task more tractable is the use of interactive IV approaches. Regarding the incorporation of experts’ prior knowledge there still exists a range of open issues. In this work, we introduce a here-named Inverse Data Visualization Framework (IDVF), which is an initial approach to make the input prior knowledge directly interpretable. Our framework is based on 2D-scatter-plots visuals and spectral kernel-driven DR techniques. To capture either the user’s knowledge or requirements, users are requested to provide changes or movements of data points in such a manner that resulting points are located where best convenient according to the user’s criterion. Next, following a Kernel Principal Component Analysis approach and a mixture of kernel matrices, our framework accordingly estimates an approximate LD space. Then, the rationale behind the proposed IDVF is to adjust as accurate as possible the resulting LD space to the representation fulfilling users’ knowledge and requirements. Results are greatly promising and open the possibility to novel DR-based visualizations approaches.}, bibtype = {inproceedings}, author = {Vélez-Falconí, M. and González-Vergara, J. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-030-61702-8_19}, booktitle = {Communications in Computer and Information Science} }
@article{ title = {Environment monitoring of rose crops greenhouse based on autonomous vehicles with a wsn and data analysis}, type = {article}, year = {2020}, keywords = {Ambient intelligence,Autonomous vehicles,Monitoring systems,Roses crops,Wireless sensor networks}, websites = {https://www.mdpi.com/1424-8220/20/20/5905}, id = {e7d09b03-f09e-3856-b403-df7348835bd3}, created = {2020-12-29T22:52:16.134Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.916Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2020}, private_publication = {false}, abstract = {This work presents a monitoring system for the environmental conditions of rose flower-cultivation in greenhouses. Its main objective is to improve the quality of the crops while regulating the production time. To this end, a system consisting of autonomous quadruped vehicles connected with a wireless sensor network (WSN) is developed, which supports the decision-making on type of action to be carried out in a greenhouse to maintain the appropriate environmental conditions for rose cultivation. A data analysis process was carried out, aimed at designing an in-situ intelligent system able to make proper decisions regarding the cultivation process. This process involves stages for balancing data, prototype selection, and supervised classification. The proposed system produces a significant reduction of data in the training set obtained by the WSN while reaching a high classification performance in real conditions—amounting to 90 % and 97.5%, respectively. As a remarkable outcome, it is also provided an approach to ensure correct planning and selection of routes for the autonomous vehicle through the global positioning system.}, bibtype = {article}, author = {Rosero-Montalvo, Paul D. and Erazo-Chamorro, Vanessa C. and López-Batista, Vivian F. and Moreno-García, María N. and Peluffo-Ordóñez, Diego H.}, doi = {10.3390/s20205905}, journal = {Sensors (Switzerland)} }
@inproceedings{ title = {Introducing the Concept of Interaction Model for Interactive Dimensionality Reduction and Data Visualization}, type = {inproceedings}, year = {2020}, keywords = {Data visualization,Dimensionality reduction,Interaction model,Kernel functions}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-58802-1_14}, id = {2bbfcc2e-1355-3d8d-8f82-4a4044d13a3d}, created = {2020-12-29T22:52:16.206Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-08-11T22:46:31.373Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Ortega-Bustamante2020}, folder_uuids = {15b32c16-cda9-4551-a173-303b5217df48}, private_publication = {false}, abstract = {This letter formally introduces the concept of interaction model (IM), which has been used either directly or tangentially in previous works but never defined. Broadly speaking, an IM consists of the use of a mixture of dimensionality reduction (DR) techniques within an interactive data visualization framework. The rationale of creating an IM is the need for simultaneously harnessing the benefit of several DR approaches to reach a data representation being intelligible and/or fitted to any user’s criterion. As a remarkable advantage, an IM naturally provides a generalized framework for designing both interactive DR approaches as well as readily-to-use data visualization interfaces. In addition to a comprehensive overview on basics of data representation and dimensionality reduction, the main contribution of this manuscript is the elegant definition of the concept of IM in mathematical terms.}, bibtype = {inproceedings}, author = {Ortega-Bustamante, M. C. and Hasperué, W. and Peluffo-Ordóñez, D. H. and Paéz-Jaime, M. and Marrufo-Rodríguez, I. and Rosero-Montalvo, P. and Umaquinga-Criollo, A. C. and Vélez-Falconi, M.}, doi = {10.1007/978-3-030-58802-1_14}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {A New Approach to Supervised Data Analysis in Embedded Systems Environments: A Case Study}, type = {inproceedings}, year = {2020}, keywords = {Data analysis,Embedded systems,Sensor data}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-52249-0_29}, id = {f927963d-541b-30f8-9642-1d45d8d880b7}, created = {2020-12-29T22:52:16.345Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.080Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Godoy-Trujillo2020}, private_publication = {false}, abstract = {Nowadays, the implementation of embedded systems with sensors for massive data collection has become widely used for their flexibility and improvement in decision making. However, this process can be affected by errors in reading, attrition of systems, among others. For this, a selection approach of supervised algorithms with a prototypes selection criterion is presented, which allows an adequate embedded system performance. To do that a quality measure was established which compromises between the data reduction of the training set, algorithm processing time and the classification performance. As a result, it was determined that the algorithm for the data selection is Condensed Nearest Neighbors (CNN) and the classification algorithm is k-Nearest Neighbour (k-NN).}, bibtype = {inproceedings}, author = {Godoy-Trujillo, Pamela E. and Rosero-Montalvo, Paul D. and Suárez-Zambrano, Luis E. and Peluffo-Ordoñez, Diego H. and Revelo-Fuelagán, E. J.}, doi = {10.1007/978-3-030-52249-0_29}, booktitle = {Advances in Intelligent Systems and Computing} }
@inproceedings{ title = {Interactive Visualization Interfaces for Big Data Analysis Using Combination of Dimensionality Reduction Methods: A Brief Review}, type = {inproceedings}, year = {2020}, keywords = {Big data,Business intelligence,Data mining,Dimensionality reduction,Interactive interface}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-37221-7_17}, id = {dc748467-947b-31be-861c-2f8e294e6bc8}, created = {2020-12-29T22:52:17.082Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.773Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Umaquinga-Criollo2020a}, private_publication = {false}, abstract = {The Big Data analysis allows to generate knowledge based on mathematical models that surpass human capabilities, and therefore it is necessary to have robust computer systems. In this connection, the dimensionality reduction (DR) allows to perform approximations to make data perceptible in a simple and compact way while also the computational cost is reduced. Additionally, interactive interfaces enable the user to work with algorithms involving complex mathematical and statistical processes typically aimed at providing weighting factors to each RD algorithm to find the best way to represent data at a low dimension. In this study, a bibliographic re-view of the different models of interactive interfaces for the analysis of Big Data using RD is presented, by considering different, existing proposals and approaches on how to display the information. Particularly, those approaches based on mental processes and uses of color along with an intuitive handling are of special interest.}, bibtype = {inproceedings}, author = {Umaquinga-Criollo, Ana C. and Peluffo-Ordóñez, Diego H. and Rosero-Montalvo, Paúl D. and Godoy-Trujillo, Pamela E. and Benítez-Pereira, Henry}, doi = {10.1007/978-3-030-37221-7_17}, booktitle = {Advances in Intelligent Systems and Computing} }
@article{ title = {Comparison of controllers and mathematical modeling of a magnetic levitator}, type = {article}, year = {2020}, keywords = {Comparison,Feedback Controller,Magnetic Levitator,Modeling,NARMA}, websites = {https://search.proquest.com/docview/2350120753}, id = {1373822f-2ba4-31a7-9e2c-b1bec7d4b94f}, created = {2021-02-24T14:43:08.381Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-23T00:51:57.187Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Herrera-Granda2020}, private_publication = {false}, abstract = {This work presents the mathematical modeling and simulation of a magnetic levitator, and a comparison of different control techniques, applied on the system, in order to visualize which technique better stabilizes the magnetic levitator. The dynamical modeling was done applying the Newton Euler’s formulation, and the obtained equations were represented on the space state. Then the system was linearized applying Taylor’s approximation, and the obtained matrixes were used for the controller’s design. The employed controllers for the comparison were: Feedback Controller, Linear–quadratic regulator (LQR), and the neural-network based nonlinear autoregressive moving average controller (NARMA). Finally, the designed controllers and the plant were tested under several simulations using MATLAB and Symulink. The results proved that the three techniques were capable of stabilizing this particular system, and some significant advantages were found applying the NARMA and LQR techniques.}, bibtype = {article}, author = {Herrera-Granda, Erick P. and Herrera-Mayorga, Karla A. and Herrera-Granda, Israel D. and Martínez, Luz Marina Sierra and Peluffo-Ordoñez, Diego H.}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@inproceedings{ title = {A Forecasting Model to Predict the Demand of Roses in an Ecuadorian Small Business Under Uncertain Scenarios}, type = {inproceedings}, year = {2020}, pages = {245-258}, websites = {http://link.springer.com/10.1007/978-3-030-64580-9_21}, publisher = {Lecture Notes in Computer Science}, id = {e7eb46c6-4b10-32f0-a8da-17ddc13368ff}, created = {2021-03-23T01:19:58.508Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-06-08T01:16:45.050Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Ecuador is worldwide considered as one of the main natural flower producers and exporters –being roses the most salient ones. Such a fact has naturally led the emergence of small and medium sized companies devoted to the production of quality roses in the Ecuadorian highlands, which intrinsically entails resource usage optimization. One of the first steps towards optimizing the use of resources is to forecast demand, since it enables a fair perspective of the future, in such a manner that the in-advance raw materials supply can be previewed against eventualities, resources usage can be properly planned, as well as the misuse can be avoided. Within this approach, the problem of forecasting the supply of roses was solved into two phases: the first phase consists of the macro-forecast of the total amount to be exported by the Ecuadorian flower sector by the year 2020, using multi-layer neural networks. In the second phase, the monthly demand for the main rose varieties offered by the study company was micro-forecasted by testing seven models. In addition, a Bayesian network model is designed, which takes into consideration macroeconomic aspects, the level of employability in Ecuador and weather-related aspects. This Bayesian network provided satisfactory results without the need for a large amount of historical data and at a low-computational cost.}, bibtype = {inproceedings}, author = {Herrera-Granda, Israel D. and Lorente-Leyva, Leandro L. and Peluffo-Ordóñez, Diego H. and Alemany, M. M. E.}, doi = {10.1007/978-3-030-64580-9_21}, booktitle = {LOD 2020} }
@inbook{ type = {inbook}, year = {2020}, pages = {131-142}, websites = {http://link.springer.com/10.1007/978-3-030-64580-9_11}, publisher = {Lecture Notes in Computer Science}, id = {bbc1675c-f5b7-3bdd-85ba-2524bc7ea499}, created = {2021-03-23T01:19:58.508Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T19:40:34.694Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {inbook}, author = {Lorente-Leyva, Leandro L. and Alemany, M. M. E. and Peluffo-Ordóñez, Diego H. and Herrera-Granda, Israel D.}, doi = {10.1007/978-3-030-64580-9_11}, chapter = {A Comparison of Machine Learning and Classical Demand Forecasting Methods: A Case Study of Ecuadorian Textile Industry} }
@article{ title = {Comparison of kernel functions for the prediction of the photovoltaic energy supply [Comparación de funciones kernel para la predicción de la oferta energética fotovoltaica]}, type = {article}, year = {2020}, pages = {310-324}, volume = {2020}, websites = {https://search.proquest.com/docview/2474915437/fulltextPDF/D88B81E498D44759PQ/1}, publisher = {Associacao Iberica de Sistemas e Tecnologias de Informacao}, id = {65bf4708-f68b-3119-89d1-fcc040dcf7c3}, created = {2021-03-29T21:34:07.106Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T21:34:07.106Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Mora-Paz2020310}, source_type = {article}, notes = {cited By 0}, private_publication = {false}, abstract = {Recently, at the fields of climate change and energy demand have turned their attention to the study and discovery of patterns in renewable energies, such as the photovoltaic-type. Such patterns can be obtained by extrapolating radiation based on the electromagnetic spectrum bands captured by NASA’s Landsat and MODIS satellites, where artificial neural network (ANN) and support vector machine (SVM) algorithms have produced the best models. Nonetheless, the acquisition of training data from those sources is expensive, as well as it lacks the exploration of kernel functions for this application. Therefore, in this study, adjustments were made in the above aspects, mainly through: coupling of new kernels to ANN and SVM in the scikit-learn library, contributing to the reuse and robustness of these algorithms; and implementing an experimental framework to tune hyper-parameters, thus generating results comparable to those reported in the state of the art. © 2020, Associacao Iberica de Sistemas e Tecnologias de Informacao. All rights reserved.}, bibtype = {article}, author = {Mora-Paz, H and Riascos, J A and Salazar-Castro, J A and Mora, G and Pantoja, A and Revelo-Fuelagán, J and Mancera-Valetts, L and Peluffo-Ordoñez, D}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao}, number = {E38} }
@inproceedings{ title = {Drowsiness Detection in Drivers Through Real-Time Image Processing of the Human Eye}, type = {inproceedings}, year = {2019}, keywords = {Alarm,Artificial intelligence,Drowsiness detection,Human eye,Image processing}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-14799-0_54}, id = {2a67ea9f-5bb9-3093-8416-c0ec493c232b}, created = {2020-12-29T22:52:03.258Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.689Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Herrera-Granda2019}, private_publication = {false}, abstract = {At a global level, drowsiness is one of the main causes of road accidents causing frequent deaths and economic losses. To solve this problem an application developed in Matlab environment was made, which processes real time acquired images in order to determine if the driver is awake or drowsy. Using AdaBoost training Algorithm for Viola-Jones eyes detection, a cascade classifier finds the location and the area of the driver eyes in each frame of the video. Once the driver eyes are detected, they are analyzed whether are open or closed by color segmentation and thresholding based on the sclera binarized area. Finally, it was implemented as a drowsiness detection system which aims to prevent driver fall asleep while driving a vehicle by activating an audible alert, reaching speeds up to 14.5 fps.}, bibtype = {inproceedings}, author = {Herrera-Granda, Erick P. and Caraguay-Procel, Jorge A. and Granda-Gudiño, Pedro D. and Herrera-Granda, Israel D. and Lorente-Leyva, Leandro L. and Peluffo-Ordóñez, Diego H. and Revelo-Fuelagán, Javier}, doi = {10.1007/978-3-030-14799-0_54}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Knee flexion,Motion analysis,Multisensor fusion,Orientation estimation}, pages = {184-199}, websites = {http://link.springer.com/10.1007/978-3-030-36636-0_14}, id = {de732fce-9ec4-3340-9cd7-f59ce3bffd82}, created = {2020-12-29T22:52:03.372Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.898Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Mayorca-Torres2019}, private_publication = {false}, abstract = {One way to identify musculoskeletal disorders in the lower limb is through the functional examination where the ranges of normality of the joints are evaluated. Currently, this test can be performed with technological support, with optical sensors and inertial measurement sensors (IMU) being the most used. Kinect has been widely used for the functional evaluation of the human body, however, there are some limits to the movements made in the depth plane and when there is occlusion of the limbs. Inertial measurement sensors (IMU) allow orientation and acceleration measurements to be obtained with a high sampling rate, with some restrictions associated with drift. This article proposes a methodology that combines the acceleration measures of the IMU and kinect sensors in two planes of movement (Frontal and sagittal). These measurements are filtered in the preprocessing stage according to a Kalman filter and are obtained from a mathematical equation that allows them to be merged. The fusion system data obtains acceptable RMS error values of 5.5 and an average consistency of 92.5% for the sagittal plane with respect to the goniometer technique. The data is shown through an interface that allows the visualization of knee joint kinematic data, as well as tools for the analysis of signals by the health professional.}, bibtype = {inbook}, author = {Mayorca-Torres, D. and Caicedo-Eraso, Julio C. and Peluffo-Ordoñez, Diego H.}, doi = {10.1007/978-3-030-36636-0_14}, chapter = {Method for the Improvement of Knee Angle Accuracy Based on Kinect and IMU: Preliminary Results}, title = {Communications in Computer and Information Science} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Bayes,Case-based reasoning,Classification,Parametric,Probability}, pages = {339-350}, websites = {http://link.springer.com/10.1007/978-3-030-14799-0_29}, id = {18ceb3e1-454b-3ce6-9f00-01609ee5a237}, created = {2020-12-29T22:52:03.831Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.079Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {BastidasTorres2019}, private_publication = {false}, abstract = {When searching for better solutions that improve the medical diagnosis accuracy, Case-Based reasoning systems (CBR) arise as a good option. This article seeks to improve these systems through the use of parametric and non-parametric probability estimation methods, particularly, at their recovery and adaptation stages. To this end, a set of experiments are conducted with two essentially different, medical databases (Cardiotocography and Cleveland databases), in order to find good parametric and non-parametric estimators. The results are remarkable as a high accuracy rate is achieved when using explored approaches: Naive Bayes and Nearest Neighbors (K-NN) estimators. In addition, a decrease on the involved processing time is reached, which suggests that proposed estimators incorporated into the recovery and adaptation stage becomes suitable for CBR systems, especially when dealing with support for medical diagnosis applications.}, bibtype = {inbook}, author = {Bastidas Torres, D. and Piñeros Rodriguez, C. and Peluffo-Ordóñez, Diego H. and Blanco Valencia, X. and Revelo-Fuelagán, Javier and Becerra, M. A. and Castro-Ospina, A. E. and Lorente-Leyva, Leandro L.}, doi = {10.1007/978-3-030-14799-0_29}, chapter = {Adaptation and Recovery Stages for Case-Based Reasoning Systems Using Bayesian Estimation and Density Estimation with Nearest Neighbors}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Alcohol detection,Drunk detection,Prototype selection,Sensors,Supervised classification}, pages = {234-243}, websites = {http://link.springer.com/10.1007/978-3-030-19651-6_23}, id = {87613764-d87a-3100-90c6-5c39b87698bb}, created = {2020-12-29T22:52:03.834Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.090Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2019a}, private_publication = {false}, abstract = {This work presents a system for detecting excess alcohol in drivers to reduce road traffic accidents. To do so, criteria such as alcohol concentration the environment, a facial temperature of the driver and width of the pupil are considered. To measure the corresponding variables, the data acquisition procedure uses sensors and artificial vision. Subsequently, data analysis is performed into stages for prototype selection and supervised classification algorithms. Accordingly, the acquired data can be stored and processed in a system with low-computational resources. As a remarkable result, the amount of training samples is significantly reduced, while an admissible classification performance is achieved - reaching then suitable settings regarding the given device’s conditions.}, bibtype = {inbook}, author = {Rosero-Montalvo, Paul D. and López-Batista, Vivian F. and Peluffo-Ordóñez, Diego H. and Erazo-Chamorro, Vanessa C. and Arciniega-Rocha, Ricardo P.}, doi = {10.1007/978-3-030-19651-6_23}, chapter = {Multivariate Approach to Alcohol Detection in Drivers by Sensors and Artificial Vision}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Recognition of emotions using ICEEMD-based characterization of multimodal physiological signals}, type = {inproceedings}, year = {2019}, pages = {113-116}, websites = {https://ieeexplore.ieee.org/document/8667585/}, month = {2}, publisher = {IEEE}, id = {7c104a31-f324-3ca1-9d38-47419bdedd8f}, created = {2020-12-29T22:52:03.949Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.342Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Ordonez-Bolanos2019}, private_publication = {false}, abstract = {Physiological-signal-Analysis-based approaches are typically used for automatic emotion identification. Given the complex nature of signals-related emotions, their right identification often results in a non-Trivial and exhaustive process-especially because such signals suffer from high dependence upon multiple external variables. Some emotional criteria of interest are arousal, valence, and dominance. Several research works have addressed this issue, mainly through creating prediction systems, notwithstanding, due to aspects such as accuracy, in-context interpretation and computational cost, it is still considered a great-of-interest, open research eld. This paper is aimed at verifying the usefulness of the so-called improved complete empirical mode decomposition (ICEEMD) as a physiological-signal-characterization building block within an emotion-predicting system. To this purpose, some physiological signals along with patients' metadata from the DEAP database are considered. The experiments are set-up as follows: Signals are pre-processed by amplitude adjusting and simple filtering. Then, a feature set is built using HC, and multiple statistic measures from information given by the three considered decompositions, namely: ICEEMD, discrete wavelet transform (DWT),and Maximal overlap DWT. Subsequently, Relief F selection algorithm was applied for reducing the dimensionality of the feature space. Finally, classifiers (LDC and K-NN cascade architectures) are used to assess the class-separability given by the feature set. The different decomposition techniques were compared, and the relevant signals and measures were established. Experimental results evidence the suitability of ICEEMD decomposition for physiological-signal-driven emotions analysis.}, bibtype = {inproceedings}, author = {Ordonez-Bolanos, O. A. and Gomez-Lara, J. F. and Becerra, M. A. and Peluffo-Ordonez, D. H. and Duque-Mejia, C. M. and Medrano-David, D. and Mejia-Arboleda, C.}, doi = {10.1109/LASCAS.2019.8667585}, booktitle = {2019 IEEE 10th Latin American Symposium on Circuits & Systems (LASCAS)} }
@article{ title = {Satellite-image-based crop identification using unsupervised machine learning techniques: Preliminary results}, type = {article}, year = {2019}, keywords = {Landsat satellite,Max-min algorithm,Parzen’s probability density function,Satellite image}, websites = {https://search.proquest.com/openview/07a5294795bdf4c5423a32a23b32a228}, id = {fb5b6f70-b50e-37d0-ae74-26eb26e475ec}, created = {2020-12-29T22:52:04.065Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.330Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Revelo2019}, private_publication = {false}, abstract = {Artificial vision and image processing have been widely used in the field of scientific research related to satellite landscapes with purposes, like soil classification, detection of changes in urban and rural areas, among others. The existing prototypes have reported meaningful results, notwithstanding, the implementation of a system more properly fitting the nature of the images by taking into account factors such as lighting control, noise reduction and presence of clouds is still an open and of-great-interest problem. This paper presents an initial satellite image processing methodology for clustering crops. The proposed methodology is as follows: Firstly, data pre-processing is carried out, followed by a feature extraction stage. Secondly, image clustering is performed by means of a probabilistic algorithm. This methodology is validated with the Campo Verde database built over crops from a Brazil’s area. Our approach reaches a classification percentage 87.97%, sensitivity 87.1%, specificity 97.22 and f1_score 71.78 %.}, bibtype = {article}, author = {Revelo, Mónica Yolanda Moreno and Gómez Menoza, Juan Bernardo and Peluffo Ordoñez, Diego Hernán}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Data analysis,Environmental monitoring,Environmental science computing,Intelligent system}, pages = {686-696}, websites = {http://link.springer.com/10.1007/978-3-030-29859-3_58}, id = {a7a58608-dae5-3d4c-8593-426651695c39}, created = {2020-12-29T22:52:04.532Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.463Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2019b}, private_publication = {false}, abstract = {This work presents a new approach to the Internet of Things (IoT) between sensor nodes and data analysis with visualization platform with the purpose to acquire urban pollution data. The main objective is to determine the degree of contamination in Ibarra city in real time. To do this, for one hand, thirteen IoT devices have been implemented. For another hand, a Prototype Selection and Data Balance algorithms comparison in relation to the classifier k-Nearest Neighbourhood is made. With this, the system has an adequate training set to achieve the highest classification performance. As a final result, the system presents a visualization platform that estimates the pollution condition with more than 90% accuracy.}, bibtype = {inbook}, author = {Rosero-Montalvo, Paul D. and López-Batista, Vivian F. and Peluffo-Ordóñez, Diego H. and Lorente-Leyva, Leandro L. and Blanco-Valencia, X. P.}, doi = {10.1007/978-3-030-29859-3_58}, chapter = {Urban Pollution Environmental Monitoring System Using IoT Devices and Data Visualization: A Case Study}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Medical dispenser of control and monitoring services for the elderly health care institute hogar del anciano “san vicente de paúl” from atuntaqui (Ecuador)}, type = {article}, year = {2019}, keywords = {Arduino,Database,Elderly,Electronic Dispenser,MySQL}, websites = {https://search.proquest.com/openview/ca355d43bc1ac0d2c3428cf5f1030ce1/1?pq-origsite=gscholar&cbl=1006393}, id = {53d1102c-7845-3971-a58a-3f690a4bba62}, created = {2020-12-29T22:52:05.265Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.498Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Encalada-Grijalva2019}, private_publication = {false}, abstract = {Aging is a stage of life of the human being that presents psychological, social, physiological changes, as well as the deterioration of their physical capacities. Elderly Home “San Vicente de Paúl” of the city of Atuntaqui (Ecuador), older adults take medicines to treat diseases such as high blood pressure, osteoarthritis, osteoporosis, cognitive deterioration, depression, hypothyroidism, gastritis, anemia, among others. The medical staff, because they do not keep a record of the dosage of medications in the pre-established hours, has caused delays, omissions in the same and a decline in their health status. The present research designs and develops an embedded system to control and monitor the supply of medicines through two types of notifications to the medical department: In the first instance, by sending text messages via GSM; in the absence of a response, the second notification is issued by means of a voice call to the nurse on duty in order to safeguard the adequate intake of medicine and comply fully with the treatments prescribed by the doctor and improve the quality of life of the adult higher.}, bibtype = {article}, author = {Encalada-Grijalva, María T. and Narváez-Pupiales, Sandra K. and Umaquinga-Criollo, Ana C. and Suárez-Zambrano, Luis E. and Peluffo-Ordóñez, Diego H.}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@article{ title = {Intelligent system for identification of wheelchair user's posture using machine learning techniques}, type = {article}, year = {2019}, keywords = {Embedded system,K-nearest neighbors,Kennard-stone,posture detection,principal component analysis}, websites = {https://ieeexplore.ieee.org/document/8565996}, id = {fb80a8a0-02bb-3d7f-a0df-1e23ffc1129f}, created = {2020-12-29T22:52:11.963Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.781Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2019c}, private_publication = {false}, abstract = {This paper presents an intelligent system aimed at detecting a person's posture when sitting in a wheelchair. The main use of the proposed system is to warn an improper posture to prevent major health issues. A network of sensors is used to collect data that are analyzed through a scheme involving the following stages: Selection of prototypes using condensed nearest neighborhood rule (CNN), data balancing with the Kennard-Stone algorithm, and reduction of dimensionality through principal component analysis. In doing so, acquired data can be both stored and processed into a micro controller. Finally, to carry out the posture classification over balanced, pre-processed data, and the K-nearest neighbors algorithm is used. It turns to be an intelligent system reaching a good tradeoff between the necessary amount of data and performance is accomplished. As a remarkable result, the amount of required data for training is significantly reduced while an admissible classification performance is achieved being a suitable trade given the device conditions.}, bibtype = {article}, author = {Rosero-Montalvo, Paul D. and Peluffo-Ordonez, Diego Hernn and Lopez Batista, Vivian Felix and Serrano, Jorge and Rosero, Edwin A.}, doi = {10.1109/JSEN.2018.2885323}, journal = {IEEE Sensors Journal} }
@article{ title = {Classification system for corporate reputation based on financial variables}, type = {article}, year = {2019}, keywords = {Adaptive diffuse inference system,Corporate reputation,Optimization by particle swarm,Reputational index,Vector support machines}, websites = {https://search.proquest.com/openview/fc081b269b3464d65f6211b07c6ca1e5/}, id = {3f13c66b-534a-37f2-aef4-d6774f4d155b}, created = {2020-12-29T22:52:12.047Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.910Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Londono-Montoya2019}, private_publication = {false}, abstract = {The most important external assessment for companies is reputation, which is very difficult to calculate since its characterization may require a large number of qualitative and quantitative data. This study presents a comparison of different corporate reputation classification systems based on financial variables. Initially, a database was constructed using data from the Corporate Reputation Business Monitor and the Business Information and Reporting System of the Colombian Superintendence of Companies. The records were labeled as high and low. Then, a relevance analysis was carried out, using linear discriminant analysis. Four classifiers (ANFIS, K-NN, F-NN, and SVM-PSO) were compared to categorize the reputation, achieving a performance of 94% accuracy, which allowed to demonstrate the discriminant capacity of the financial variables to classify the reputation.}, bibtype = {article}, author = {Londoño-Montoya, Erika and Becerra, Miguel A. and Murillo-Escobar, Juan and Gómez-Bayona, Ledy and Moreno-López, Gustavo and Peluffo-Ordoñez, Diego}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@article{ title = {Public urban transport optimization by means of tabu search and pso algorithms: Medellín, colombia}, type = {article}, year = {2019}, keywords = {Bus scheduling problem,Particle swarm optimization,Tabu search optimization,Urban public transport: Route optimization}, websites = {https://search.proquest.com/openview/ec8601c82489c20f58286629e316c348}, id = {dcd326c3-2d6d-3419-8e5c-3e4e3aebb7c5}, created = {2020-12-29T22:52:12.314Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.203Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Betancur-Delgado2019}, private_publication = {false}, abstract = {Urban public transport in the city of Medellín (Col) has had a positive development, however insufficient due to the increase in population density. This paper presents a comparative analysis of the Tabu Search algorithm (TS) and the Particle Swarm Optimization algorithm (PSO). It proposes an optimization of the urban public transport service in the northern area of the city, using variables from different organizational units (vehicle mechanics, human resources management, environmental and operational management). The algorithms achieved convergence with the objective of maximizing profitability regarding the use of buses during the operating day. A route planning proposal was obtained that allows a user’s increment of 25%, improve service times, generating sustainable development for the environment and the transport company.}, bibtype = {article}, author = {Betancur-Delgado, Laura and Becerra, Miguel A. and Duque-Mejía, Carolina and Peluffo-Ordóñez, Diego and Álvarez-Uribe, Karla C.}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Forecasting,Genetic algorithm,Master Production Scheduling,Optimization,Production planning,Textile industry}, pages = {674-685}, websites = {http://link.springer.com/10.1007/978-3-030-29859-3_57}, id = {1ccef8c5-9657-34af-9b2e-9578fd0a47fb}, created = {2020-12-29T22:52:12.360Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.141Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Lorente-Leyva2019}, private_publication = {false}, abstract = {In a competitive environment, an industry’s success is directly related to the level of optimization of its processes, how production is planned and developed. In this area, the master production scheduling (MPS) is the key action for success. The object of study arises from the need to optimize the medium-term production planning system in a textile company, through genetic algorithms. This research begins with the analysis of the constraints, mainly determined by the installed capacity and the number of workers. The aggregate production planning is carried out for the T-shirts families. Due to such complexity, the application of bioinspired optimization techniques demonstrates their best performance, before industries that normally employ exact and simple methods that provide an empirical MPS but can compromise efficiency and costs. The products are then disaggregated for each of the items in which the MPS is determined, based on the analysis of the demand forecast, and the orders made by customers. From this, with the use of genetic algorithms, the MPS is optimized to carry out production planning, with an improvement of up to 96% of the level of service provided.}, bibtype = {inbook}, author = {Lorente-Leyva, Leandro L. and Murillo-Valle, Jefferson R. and Montero-Santos, Yakcleem and Herrera-Granda, Israel D. and Herrera-Granda, Erick P. and Rosero-Montalvo, Paul D. and Peluffo-Ordóñez, Diego H. and Blanco-Valencia, Xiomara P.}, doi = {10.1007/978-3-030-29859-3_57}, chapter = {Optimization of the Master Production Scheduling in a Textile Industry Using Genetic Algorithm}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Artificial neural networks,Long-term demand forecasting,Small business}, pages = {362-373}, websites = {http://link.springer.com/10.1007/978-3-030-20518-8_31}, id = {88bdba1f-df89-3f63-857c-fd01e349b056}, created = {2020-12-29T22:52:13.837Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.030Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Herrera-Granda2019a}, private_publication = {false}, abstract = {This paper shows a neural networks-based demand forecasting model designed for a small manufacturer of bottled water in Ecuador, which currently doesn’t have adequate demand forecast methodologies, causing problems of customer orders non-compliance, inventory excess and economic losses. However, by working with accurate predictions, the manufacturer will have an anticipated vision of future needs in order to satisfy the demand for manufactured products, in other words, to guarantee on time and reasonable use of the resources. To solve the problems that this small manufacturer has to face a historic demand data acquisition process was done through the last 36 months costumer order records. In the construction of the historical time series, that was analyzed, demand dates and volumes were established as input variables. Then the design of forecast models was done, based on classical methods and multi-layer neural networks, which were evaluated by means of quantitative error indicators. The application of these methods was done through the R programming language. After this, a stage of training and improvement of the network is included, it was evaluated against the results of the classic forecasting methods, and the next 12 months were predicted by means of the best obtained model. Finally, the feasibility of the use of neural networks in the forecast of demand for purified water bottles, is demonstrated.}, bibtype = {inbook}, author = {Herrera-Granda, Israel D. and Chicaiza-Ipiales, Joselyn A. and Herrera-Granda, Erick P. and Lorente-Leyva, Leandro L. and Caraguay-Procel, Jorge A. and García-Santillán, Iván D. and Peluffo-Ordóñez, Diego H.}, doi = {10.1007/978-3-030-20518-8_31}, chapter = {Artificial Neural Networks for Bottled Water Demand Forecasting: A Small Business Case Study}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Decision making,Embedded systems,Internet of things,Machine learning}, pages = {874-883}, websites = {http://link.springer.com/10.1007/978-3-030-02686-8_65}, id = {a42a96bb-7a40-36bb-a21e-87e7b5fe3209}, created = {2020-12-29T22:52:14.353Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.220Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2019}, private_publication = {false}, abstract = {The use of electronic systems and devices has become widely spread and is reaching several fields as well as indispensable for many daily activities. Such systems and devices (here termed embedded systems) are aiming at improving human beings’ quality of life. To do so, they typically acquire users’ data to adjust themselves to different needs and environments in an adequate fashion. Consequently, they are connected to data networks to share this information and find elements that allow them to make the appropriate decisions. Then, for practical usage, their computational capabilities should be optimized to avoid issues such as: resources saturation (mainly memory and battery). In this line, machine learning offers a wide range of techniques and tools to incorporate “intelligence” into embedded systems, enabling them to make decisions by themselves. This paper reviews different data storage techniques along with machine learning algorithms for embedded systems. Its main focus is on techniques and applications (with special interest in Internet of Things) reported in literature about data analysis criteria to make decisions.}, bibtype = {inbook}, author = {Rosero-Montalvo, Paul D. and Batista, Vivian F. López and Rosero, Edwin A. and Jaramillo, Edgar D. and Caraguay, Jorge A. and Pijal-Rojas, José and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-030-02686-8_65}, chapter = {Intelligence in Embedded Systems: Overview and Applications}, title = {Advances in Intelligent Systems and Computing} }
@article{ title = {Artificial Neural Networks for Urban Water Demand Forecasting: A Case Study}, type = {article}, year = {2019}, pages = {012004}, volume = {1284}, websites = {https://iopscience.iop.org/article/10.1088/1742-6596/1284/1/012004}, month = {8}, id = {09fdc602-9310-39f3-a0f4-0814561ed2a5}, created = {2020-12-29T22:52:14.499Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.506Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Lorente-Leyva2019a}, private_publication = {false}, abstract = {This paper presents an application of an artificial neural network model in forecasting urban water demand using MATLAB software. Considering that in any planning process, the demand forecast plays a fundamental role, being one of the premises to organize and control a set of activities or processes. The versatility of the short, medium and long-term prediction that is provided to the company that offers the water distribution service to determine the supply capacity, maintenance activities, and system improvements as a strategic planning tool. Shown to improve network performance by using time series water demand data, the model can provide excellent fit and forecast without relying on the explicit inclusion of climatic factors and number of consumers. The excellent accuracy of the model indicates the effectiveness of forecasting over different time horizons. Finally, the results obtained from the Artificial Neural Network are compared with traditional statistical models.}, bibtype = {article}, author = {Lorente-Leyva, Leandro L. and Pavón-Valencia, Jairo F. and Montero-Santos, Yakcleem and Herrera-Granda, Israel D and Herrera-Granda, Erick P. and Peluffo-Ordóñez, Diego H.}, doi = {10.1088/1742-6596/1284/1/012004}, journal = {Journal of Physics: Conference Series} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Collection network,Facility Location Problem,NP-hard,Optimization,Reverse logistics,Urban solid waste}, pages = {578-589}, websites = {http://link.springer.com/10.1007/978-3-030-05532-5_44}, id = {95d9c213-1397-3778-8c3b-3ad38f6d4c0f}, created = {2020-12-29T22:52:16.141Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.983Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Herrera-Granda2019b}, private_publication = {false}, abstract = {This paper presents the results of the optimization of the urban solid waste container network in the urban sector of the Ibarra City, Ecuador by the implementation of an optimization model, which consists of a multi-objective mixed integer programming model which has been successfully used in the context of recycling in past studies. This model was modified so that possible locations of the containers at each corner of the blocks containing the constructed buildings were considered. As well, a restriction to count the containers to be installed was added. Furthermore, to add robustness to the model, it was also considered the filling of the container based on the density of the deposited waste and the model objective functions – being, a weighted sum of the cost of the installation of the network along with the average walking distance between users and the assigned containers. The outputs of the model are the total number of containers and a map with the optimal locations of municipal solid waste containers for Ibarra city. The model was implemented in GAMS platform wherein parameters can be permanently revised so that the results may be updated in case of variations of the initial conditions.}, bibtype = {inbook}, author = {Herrera-Granda, Israel D. and Imbaquingo-Usiña, Wilson G. and Lorente-Leyva, Leandro L. and Herrera-Granda, Erick P. and Peluffo-Ordóñez, Diego H. and Rossit, Diego G.}, doi = {10.1007/978-3-030-05532-5_44}, chapter = {Optimization of the Network of Urban Solid Waste Containers: A Case Study}, title = {Communications in Computer and Information Science} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Kernels,Motion tracking,Spectral clustering}, pages = {30-40}, websites = {http://link.springer.com/10.1007/978-3-030-36636-0_3}, id = {d6bcc2ee-3fe7-34fe-bec6-f89110405a59}, created = {2020-12-29T22:52:16.382Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.332Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Ona-Rocha2019}, private_publication = {false}, abstract = {Time-varying data characterization and classification is a field of great interest in both scientific and technology communities. There exists a wide range of applications and challenging open issues such as: automatic motion segmentation, moving-object tracking, and movement forecasting, among others. In this paper, we study the use of the so-called kernel spectral clustering (KSC) approach to capture the dynamic behavior of frames - representing rotating objects - by means of kernel functions and feature relevance values. On the basis of previous research works, we formally derive a here-called tracking vector able to unveil sequential behavior patterns. As a remarkable outcome, we alternatively introduce an encoded version of the tracking vector by converting into decimal numbers the resulting clustering indicators. To evaluate our approach, we test the studied KSC-based tracking over a rotating object from the COIL 20 database. Preliminary results produce clear evidence about the relationship between the clustering indicators and the starting/ending time instance of a specific dynamic sequence.}, bibtype = {inbook}, author = {Oña-Rocha, O. and Riascos-Salas, J. A. and Marrufo-Rodríguez, I. C. and Páez-Jaime, M. A. and Mayorca-Torres, D. and Ponce-Guevara, K. L. and Salazar-Castro, J. A. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-030-36636-0_3}, chapter = {Kernel-Spectral-Clustering-Driven Motion Segmentation: Rotating-Objects First Trials}, title = {Communications in Computer and Information Science} }
@article{ title = {Multi-target tracking for sperm motility measurement using the kalman filter and JPDAF: Preliminary results}, type = {article}, year = {2019}, keywords = {JPDAF,Kalman filter,Morphology,Motility,Spermatozoa}, websites = {https://search.proquest.com/openview/69fcef4b61d6ec863099124a9c2fe66f}, id = {7d2ef08d-cd23-3051-8646-2b09a136bfdc}, created = {2020-12-29T22:52:16.393Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.117Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Mayorca-Torres2019a}, private_publication = {false}, abstract = {The determination of sperm motility characteristics is of great importance for the specification of fertility in men. The semengram is the main diagnostic test to confirm semen quality. Currently, many fertility laboratories use visual assistance techniques to evaluate by using the Makler counting chamber, where motility and sperm count analysis can be performed. This research project proposes a method that allows the quantification of motility through the use of the probabilistic filter (JPDAF) based on the Kalman filter. This research requires the stages of segmentation, feature extraction and development of tracking algorithms for the association of sperm trajectories when there are multiple objectives. A total of 200 individual sperm were selected and the effectiveness for sperm classification was determined according to the mobility categories established by the WHO, obtaining an average value of 93.5% for the categories (A, B, C and D).}, bibtype = {article}, author = {Mayorca-Torres, Dagoberto and Guerrero-Chapal, H. and Mejía-Manzano, Julio and Lopez-Mesa, Diana and Peluffo-Ordoñez, Diego H. and Salazar-Castro, José A.}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@article{ title = {Optimization-based algorithms applied in photovoltaic systems}, type = {article}, year = {2019}, keywords = {Algorithms,Dynamic programming,Photovoltaic energy}, websites = {https://search.proquest.com/openview/33e52f4b710e1368bead8eda6346684a}, id = {7ce4072e-1c11-326e-83d6-6f997d68a76e}, created = {2020-12-29T22:52:16.930Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.627Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Fernandez2019}, private_publication = {false}, abstract = {Network codes have been developed to facilitate the integration of power plants into the electrical system; a specific case is that concerning the performance of large scale photovoltaic power plants, which represents a drawback for operators of transmission systems due to the way in which solar irradiance varies during the day. The present study is focused on a group of models that are based on dynamic programming with recursive algorithms. Broadly, such algorithms may significantly benefit to the decision making in the field of the optimization of the photovoltaic systems. The main contribution of this work lies on the the compilation of a group of models whose recursive algorithms allow evaluating each stage of a given process and related to a group of parameters that can be controlled within a dynamic model whose nuances vary depending on other submodels associated with some families of classic models of linear and nonlinear programming.}, bibtype = {article}, author = {Fernández, Yasmany Fernández and Tobar, Ana Cabrera and Peluffo-Ordóñez, Diego H. and Manosalvas, Teresa Sánchez and Miranda, Ridelio}, journal = {RISTI - Revista Iberica de Sistemas e Tecnologias de Informacao} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Emotion recognition,Improved complementary ensemble empirical mode dec,Multimodal,Physiological signals,Signal processing}, pages = {351-362}, websites = {http://link.springer.com/10.1007/978-3-030-14799-0_30}, id = {e04d60b8-4c80-33d9-ba99-76ada1c3e6f1}, created = {2020-12-29T22:52:16.951Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.580Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Gomez-Lara2019}, private_publication = {false}, abstract = {The emotions identification is a very complex task due to depending on multiple variables individually and as a group. They are evaluated by different criteria such as arousal, valence, and dominance mainly. Several investigations have been focused on building prediction systems. Nevertheless, this is still an open research field. The main objective of this paper is the analysis of the Improved Complementary Ensemble Empirical Mode Decomposition (ICEEMD) for feature extraction from physiological signals for emotions prediction. Physiological signals and metadata of the DEAP database were used. First, the signals were preprocessed, then three decompositions were carried out using ICEEMD, Discrete Wavelet Transform (DWT), and Maximal overlap DWT. Feature extraction was carried out using Hermite coefficients, and multiple statistic measures from IMFs, coefficients DWT, and MODWT, and signals. Then, Relief F selection algorithms were applied to reducing the dimensionality of the feature space. Finally, Linear Discriminant Classifier (LDC) and K-NN cascade, and Random Forest classifiers were tested. The different decomposition techniques were compared, and the relevant signals and measures were established. The results demonstrated the capability of ICEEMD decomposition for emotions analysis from physiological signals.}, bibtype = {inbook}, author = {Gómez-Lara, J. F. and Ordóñez-Bolaños, O. A. and Becerra, M. A. and Castro-Ospina, A. E. and Mejía-Arboleda, C. and Duque-Mejía, C. and Rodriguez, J. and Revelo-Fuelagán, Javier and Peluffo-Ordóñez, Diego H.}, doi = {10.1007/978-3-030-14799-0_30}, chapter = {Feature Extraction Analysis for Emotion Recognition from ICEEMD of Multimodal Physiological Signals}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Artificial Neural Networks in the Demand Forecasting of a Metal-Mechanical Industry}, type = {article}, year = {2019}, pages = {81-87}, volume = {15}, websites = {http://www.medwelljournals.com/abstract/?doi=jeasci.2020.81.87}, month = {10}, day = {25}, id = {ed55a7f4-126d-3d78-9b85-d0ba81a3068d}, created = {2020-12-29T22:52:17.155Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.891Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {L.Lorente-Leyva2019}, private_publication = {false}, abstract = {This research presents an application of artificial neural networks in demand forecasting by using MATLAB Software. Keeping in mind that in any planning process forecasts play a fundamental role, being one of the bases for; planning, organizing and controlling production. It gives priority to the most critical nodes and their key activities, so that, the decisions made about them will generate the greatest possible positive impact. The methodology applied demonstrates the quality of the solutions found which are compared with traditional statistical methods to demonstrate the value of the solution proposed. When the results show that the minimum quadratic error is reached with the application of artificial neural networks, a better performance is obtained. Therefore, a suitable horizon is established for the planification and decision making in the metal-mechanical industry for the use of artificial intelligence in the production processes.}, bibtype = {article}, author = {L. Lorente-Leyva, Leandro and R. Patino-Alarcon, Delio and Montero-Santos, Yakcleem and D. Herrera-Granda, Israel and H. Peluffo-Ordonez, Diego and M. Lastre-Aleaga, Arlys and Cordoves-Garcia, Alexis}, doi = {10.36478/jeasci.2020.81.87}, journal = {Journal of Engineering and Applied Sciences}, number = {1} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Electroencephalographic signal,Machine learning,Semantic category,Semantic retrieval,Signal processing}, pages = {333-342}, volume = {11466 LNBI}, websites = {http://link.springer.com/10.1007/978-3-030-17935-9_30}, publisher = {Springer Verlag}, id = {4a3de9d0-97e4-3811-a0e2-00930b94de06}, created = {2021-01-06T16:05:40.132Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.645Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2019}, private_publication = {false}, abstract = {Nowadays, there exist high interest in the brain-computer interface (BCI) systems, and there are multiple approaches to developing them. Lexico-semantic (LS) classification from Electroencephalographic (EEG) signals is one of them, which is an open and few explored research field. The LS depends on the creation of the concepts of each person and its context. Therefore, it has not been demonstrated a universal fingerprint of the LS either the spatial location in the brain, which depends on the variability the brain plasticity and other changes throughout the time. In this study, an analysis of LS from EEG signals was carried out. The Emotiv Epoc+ was used for the EEG acquisition from three participants reading 36 different words. The subjects were characterized throughout two surveys (Becks depression, and emotion test) for establishing their emotional state, depression, and anxiety levels. The signals were processed to demonstrate semantic category and for decoding individual words (4 pairs of words were selected for this study). The methodology was executed as follows: first, the signals were pre-processed, decomposed by sub-bands (δ, θ, α, β, and γ ) and standardized. Then, feature extraction was applied using linear and non-linear statistical measures, and the Discrete Wavelet Transform calculated from EEG signals, generating the feature space termed set-1. Also, the principal component analysis was applied to reduce the dimensionality, generating the feature space termed set-2. Finally, both sets were tested independently by multiple classifiers based on the support vector machine and k- nearest neighbor. These were validated using 10-fold cross-validation achieving results upper to 95% of accuracy which demonstrated the capability of the proposed mechanism for decoding LS from a reduced number of EEG signals acquired using a portable system of acquisition.}, bibtype = {inbook}, author = {Becerra, Miguel Alberto and Londoño-Delgado, Edwin and Botero-Henao, Oscar I. and Marín-Castrillón, Diana and Mejia-Arboleda, Cristian and Peluffo-Ordóñez, Diego Hernán}, doi = {10.1007/978-3-030-17935-9_30}, chapter = {Low Resolution Electroencephalographic-Signals-Driven Semantic Retrieval: Preliminary Results}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2019}, keywords = {Electroencephalogram (EEG),Epilepsy diagnosis,K-Nearest Neighbors (KNN),Linear Discriminant Analysis (LDA),Quadratic Discriminant Analysis (QDA),Support Vector Machine (SVM)}, pages = {189-198}, volume = {11976 LNAI}, websites = {http://link.springer.com/10.1007/978-3-030-37078-7_19}, id = {9ebfc822-b2bf-3b9e-915a-7ef71f5e4dc3}, created = {2020-02-03T23:59:00.000Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T00:46:46.362Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {true}, abstract = {© 2019, Springer Nature Switzerland AG. Epilepsy occurs when localized electrical activity of neurons suffer from an imbalance. One of the most adequate methods for diagnosing and monitoring is via the analysis of electroencephalographic (EEG) signals. Despite there is a wide range of alternatives to characterize and classify EEG signals for epilepsy analysis purposes, many key aspects related to accuracy and physiological interpretation are still considered as open issues. In this paper, this work performs an exploratory study in order to identify the most adequate frequently-used methods for characterizing and classifying epileptic seizures. In this regard, a comparative study is carried out on several subsets of features using four representative classifiers: Linear Discriminant Analysis (LDA), Quadratic Discriminant Analysis (QDA), K-Nearest Neighbor (KNN), and Support Vector Machine (SVM). The framework uses a well-known epilepsy dataset and runs several experiments for two and three classification problems. The results suggest that DWT decomposition with SVM is the most suitable combination.}, bibtype = {inbook}, author = {Vega-Gualán, Emil and Vargas, Andrés and Becerra, Miguel and Umaquinga, Ana and Riascos, Jaime A. and Peluffo, Diego}, doi = {10.1007/978-3-030-37078-7_19}, chapter = {Exploring the Characterization and Classification of EEG Signals for a Computer-Aided Epilepsy Diagnosis System}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Cardiac Murmur Effects on Automatic Segmentation of ECG Signals for Biometric Identification: Preliminary Study}, type = {inproceedings}, year = {2019}, keywords = {Automatic segmentation,Biometric,Electrocardiographic signal,Heart murmur,Pattern recognition}, pages = {269-279}, websites = {https://link.springer.com/chapter/10.1007/978-3-030-14799-0_23,http://link.springer.com/10.1007/978-3-030-14799-0_23}, id = {00eba47b-9981-3870-8a8e-00e9074d0ca7}, created = {2021-03-29T00:36:07.396Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T00:36:07.396Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Duque-Mejia2019}, private_publication = {false}, abstract = {Biometric identification or authentication is a pattern recognition process, which is carried out acquiring different measures of human beings to distinguish them. Fingerprint and eye iris are the most known and used biometric techniques; nevertheless, also they are the most vulnerable to counterfeiting. Consequently, nowadays research has been focused on physiological signals and behavioral traits for biometric identification because these allow not only the authentication but also determine that the subject is alive. Electrocardiographic signals (ECG-S) have been studied for biometric identification demonstrating their capability. Taking into account that some pathologies are detected using ECG-S, these can affect the results of biometric identification; nonetheless, some diseases such as cardiac murmurs are not detected by ECG-S, but they can distort their morphology. Therefore, these signals must be analyzed considering different pathologies. In this paper, a biometric study was carried out from 40 subjects (20 with cardiac murmurs and 20 without cardiac affections). First, the ECG-S were preprocessed and segmented using the fast method for detecting T waves with annotation of P and T waves, then feature extraction was carried out using discrete wavelet transform (DWT), maximal overlap DWT, cepstral coefficients, and statistical measures. Then, rough set and relief F algorithms were applied to datasets (pathological and normal signals) for attribute reduction. Finally, multiple classifiers and combinations of them were tested. The results of the segmentation were analyzed achieving low results for signals affected by cardiac murmurs. On the other hand, according to the cardiac murmur effects analyzed, the performance of the classifiers in cascade shown the best accuracy for human identification from ECG-S, minimizing the impact of variability generated on ECG-S by cardiac murmurs diseases.}, bibtype = {inproceedings}, author = {Duque-Mejía, C. and Becerra, M. A. and Zapata-Hernández, C. and Mejia-Arboleda, C. and Castro-Ospina, A. E. and Delgado-Trejos, E. and Peluffo-Ordóñez, Diego H. and Rosero-Montalvo, P. and Revelo-Fuelagán, Javier}, doi = {10.1007/978-3-030-14799-0_23}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Non-generalized Analysis of the Multimodal Signals for Emotion Recognition: Preliminary Results}, type = {inproceedings}, year = {2019}, keywords = {Emotion recognition,Physiological signals,Signal processing}, pages = {363-373}, volume = {11466 LNBI}, websites = {https://link.springer.com/chapter/10.1007%2F978-3-030-17935-9_33}, publisher = {Springer Verlag}, id = {e691aabc-2aaa-3dec-b7fa-fe39d0d1416b}, created = {2021-11-10T17:29:49.220Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-10T17:31:53.163Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Emotions are mental states associated with some stimuli, and they have a relevant impact on the people living and are correlated with their physical and mental health. Different studies have been carried out focused on emotion identification considering that there is a universal fingerprint of the emotions. However, this is an open field yet, and some authors had refused such proposal which is contrasted with many results which can be considered as no conclusive despite some of them have achieved high results of performances for identifying some emotions. In this work an analysis of identification of emotions per individual based on physiological signals using the known MAHNOB-HCI-TAGGING database is carried out, considering that there is not a universal fingerprint based on the results achieved by a previous meta-analytic investigation of emotion categories. The methodology applied is depicted as follows: first the signals were filtered and normalized and decomposed in five bands (δ, θ, α, β, γ ), then a features extraction stage was carried out using multiple statistical measures calculated of results achieved after applied discrete wavelet transform, Cepstral coefficients, among others. A feature space dimensional reduction was applied using the selection algorithm relief F. Finally, the classification was carried out using support vector machine, and k-nearest neighbors and its performance analysis was measured using 10 folds cross-validation achieving high performance uppon to 99%.}, bibtype = {inproceedings}, author = {Londoño-Delgado, Edwin and Becerra, Miguel Alberto and Duque-Mejía, Carolina M. and Zapata, Juan Camilo and Mejía-Arboleda, Cristian and Castro-Ospina, Andrés Eduardo and Peluffo-Ordóñez, Diego Hernán}, doi = {10.1007/978-3-030-17935-9_33}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Classification,Computer-aided diagnosis,Optimization,Voice pathology}, pages = {148-159}, websites = {http://link.springer.com/10.1007/978-3-030-00350-0_13}, id = {5efbe3ff-b6fa-3bf6-84b6-0ef0fd3bbcb9}, created = {2020-12-29T22:52:03.476Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.858Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Areiza-Laverde2018}, private_publication = {false}, abstract = {Computer-aided diagnosis (CAD) systems have allowed to enhance the performance of conventional, medical diagnosis procedures in different scenarios. Particularly, in the context of voice pathology detection, the use of machine learning algorithms has proved to be a promising and suitable alternative. This work proposes the implementation of two well known classification algorithms, namely artificial neural networks (ANN) and support vector machines (SVM), optimized by particle swarm optimization (PSO) algorithm, aimed at classifying voice signals between healthy and pathologic ones. Three different configurations of the Saarbrucken voice database (SVD) are used. The effect of using balanced and unbalanced versions of this dataset is proved as well as the usefulness of the considered optimization algorithm to improve the final performance outcomes. Also, proposed approach is comparable with state-of-the-art methods.}, bibtype = {inbook}, author = {Areiza-Laverde, Henry Jhoán and Castro-Ospina, Andrés Eduardo and Peluffo-Ordóñez, Diego Hernán}, doi = {10.1007/978-3-030-00350-0_13}, chapter = {Voice Pathology Detection Using Artificial Neural Networks and Support Vector Machines Powered by a Multicriteria Optimization Algorithm}, title = {Communications in Computer and Information Science} }
@inproceedings{ title = {Wireless Sensor Networks for Irrigation in Crops Using Multivariate Regression Models}, type = {inproceedings}, year = {2018}, keywords = {WSN,crops analysis,regression model}, pages = {1-6}, websites = {https://ieeexplore.ieee.org/document/8580322/}, month = {10}, publisher = {IEEE}, id = {af0d205f-eb1b-320e-b11c-72e6d4644cd8}, created = {2020-12-29T22:52:03.731Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.076Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2018b}, private_publication = {false}, abstract = {The present wireless sensor network system shows a data analysis approach within greenhouses in short cycle crops. This research, on the one hand, is carried out to reduce water consumption and improve the product by predicting the right moment of the irrigation cycle through the evapotranspiration criterion. On the other hand, an efficient electronic system is designed under the electronic standard. To define the best model to define the next irrigation in the crops in base to ground humidity, the algorithms are compared for continuous and discontinuous multivariate regressions. The results are evaluated with different criteria of prediction errors. As a result, the linear regression with Support Vector Machine model is chosen for counting an average deviation error of 7.89% and an error variability of 4.48%. In addition, water consumption is reduced by 20%, achieving better quality products.}, bibtype = {inproceedings}, author = {Rosero-Montalvo, Paul D. and Pijal-Rojas, Jose and Vasquez-Ayala, Carlos and Maya, Edgar and Pupiales, Carlos and Suarez, Luis and Benitez-Pereira, Henry and Peluffo-Ordonez, D.H.}, doi = {10.1109/ETCM.2018.8580322}, booktitle = {2018 IEEE Third Ecuador Technical Chapters Meeting (ETCM)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Artificial intelligence,CMDS,Dimensionality reduction methods,Kernel,Kernel PCA,LE,LLE}, pages = {28-38}, websites = {http://link.springer.com/10.1007/978-3-319-98998-3_3}, id = {0d870c5b-691a-3e28-b081-d4834535ddbc}, created = {2020-12-29T22:52:03.858Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.218Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Basante-Villota2018}, private_publication = {false}, abstract = {This work presents a comparative analysis between the linear combination of em-bedded spaces resulting from two approaches: (1) The application of dimensional reduction methods (DR) in their standard implementations, and (2) Their corresponding kernel-based approximations. Namely, considered DR methods are: CMDS (Classical Multi- Dimensional Scaling), LE (Laplacian Eigenmaps) and LLE (Locally Linear Embedding). This study aims at determining -through objective criteria- what approach obtains the best performance of DR task for data visualization. The experimental validation was performed using four databases from the UC Irvine Machine Learning Repository. The quality of the obtained embedded spaces is evaluated regarding the RNX(K) criterion. The RNX(K) allows for evaluating the area under the curve, which indicates the performance of the technique in a global or local topology. Additionally, we measure the computational cost for every comparing experiment. A main contribution of this work is the provided discussion on the selection of an interactivity model when mixturing DR methods, which is a crucial aspect for information visualization purposes.}, bibtype = {inbook}, author = {Basante-Villota, C. K. and Ortega-Castillo, C. M. and Peña-Unigarro, D. F. and Revelo-Fuelagán, J. E. and Salazar-Castro, J. A. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-319-98998-3_3}, chapter = {Comparative Analysis Between Embedded-Spaces-Based and Kernel-Based Approaches for Interactive Data Representation}, title = {Communications in Computer and Information Science} }
@inproceedings{ title = {Sign Language Recognition Based on Intelligent Glove Using Machine Learning Techniques}, type = {inproceedings}, year = {2018}, keywords = {intelligent glove,knn,prototype selection,sign language}, pages = {1-5}, websites = {https://ieeexplore.ieee.org/document/8580268/}, month = {10}, publisher = {IEEE}, id = {6ce5604a-fcc6-3572-9a7c-65bd38a893f3}, created = {2020-12-29T22:52:04.051Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.367Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2018a}, private_publication = {false}, abstract = {We present an intelligent electronic glove system able to detect numbers of sign language in order to automate the process of communication between a deaf-mute person and others. This is done by translating the hands move sign language into an oral language. The system is inside to a glove with flex sensors in each finger that we are used to collect data that are analyzed through a methodology involving the following stages: (i) Data balancing with the Kennard-Stone (KS), (ii) Comparison of prototypes selection between CHC evolutionary Algorithm and Decremental Reduction Optimization Procedure 3 (DROP3) to define the best one. Subsequently, the K-Nearest Neighbors (kNN) as classifier (iii) is implemented. As a result, the amount of data reduced from stage (i) from storage within the system is 98%. Also, a classification performance of 85% is achieved with CHC evolutionary algorithm.}, bibtype = {inproceedings}, author = {Rosero-Montalvo, Paul D. and Godoy-Trujillo, Pamela and Flores-Bosmediano, Edison and Carrascal-Garcia, Jorge and Otero-Potosi, Santiago and Benitez-Pereira, Henry and Peluffo-Ordonez, Diego H.}, doi = {10.1109/ETCM.2018.8580268}, booktitle = {2018 IEEE Third Ecuador Technical Chapters Meeting (ETCM)} }
@article{ title = {Mejoramiento de las Capacidades Tecnológicas en los Sectores Rurales de la Provincia de Imbabura}, type = {article}, year = {2018}, websites = {https://ia601608.us.archive.org/0/items/Articulo11_201705/Artículo 11.pdf}, id = {06f95a3e-7406-30d1-855a-2bfdbf73c9e6}, created = {2020-12-29T22:52:04.196Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.505Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Armas2018}, private_publication = {false}, abstract = {La Universidad Técnica del Norte a través de la carrera de ingeniería en Electrónica y Redes de Comunicación, fortaleciendo al proceso de vinculación con la sociedad, crea un proyecto que beneficia a la población de las zonas rurales de la provincia de Imbabura, y que, particularmente, tiene el objetivo de disminuir la brecha digital en colegios, brindado acceso a las tecnologías de información y comunicación. Para poner en acción el proyecto de mejoramiento de las capacidades tecnológicas, se consideró como población de interés a los individuos mayores de 12 años, hombres y mujeres, de diferentes niveles sociales, culturales y étnicos. Metodológicamente, se plantea una planificación de programas de capacitación en base a un diagnóstico de las necesidades, que posteriormente fueron evaluados con el propósito de reducir las deficiencias tecnológicas de las personas y así mejorar la calidad de vida con miras a reducir el analfabetismo digital. Dentro de la ejecución del proyecto, se toma como estrategia los Infocentros Comunitarios y propios laboratorios de computación de las instituciones educativas, los mismos que son punto de referencia para llegar a las comunidades, logrando capacitar en herramientas de ofimática básica, asistencias de redes de comunicación, Internet, mantenimiento de computadoras, y administración de páginas web. Posteriormente a la capacitación, claramente se pudo observar que, en los sectores de acción, satisficieron sus expectativas, adquirieron habilidades y destrezas de temas tecnológicos. En este artículo se presenta los aspectos metodológicos y resultados más importantes del proyecto.}, bibtype = {article}, author = {Armas, Stefany Flores and Montalvo, Paul Rosero and Olalla, Edgar Maya and Ordoñez, Diego Peluffo}, journal = {Ciencia} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Fingertip segmentation,NPR measurement,Thermal hand images,Thermorregulation}, pages = {455-463}, websites = {http://link.springer.com/10.1007/978-3-319-92639-1_38}, id = {3242e953-bf4e-32ee-94dc-b1f3404eab9b}, created = {2020-12-29T22:52:04.212Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.421Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Castro-Ospina2018}, private_publication = {false}, abstract = {Thermoregulation refers to the physiological processes that maintain stable the body temperatures. Infrared thermography is a non-invasive technique useful for visualizing these temperatures. Previous works suggest it is important to analyze thermoregulation in peripheral regions, such as the fingertips, because some disabling pathologies affect particularly the thermoregulation of these regions. This work proposes an algorithm for fingertip segmentation in thermal images of the hand. By using a supervised index, the results are compared against segmentations provided by humans. The results are outstanding even when the analyzed images are highly resized.}, bibtype = {inbook}, author = {Castro-Ospina, A. E. and Correa-Mira, A. M. and Herrera-Granda, I. D. and Peluffo-Ordóñez, D. H. and Fandiño-Toro, H. A.}, doi = {10.1007/978-3-319-92639-1_38}, chapter = {Fingertips Segmentation of Thermal Images and Its Potential Use in Hand Thermoregulation Analysis}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Eigenvectors,Graph-based clustering,Normalized cut clustering,Quadratic forms}, pages = {318-328}, websites = {http://link.springer.com/10.1007/978-3-319-92537-0_37}, id = {5927d459-2fdd-3007-b8e1-9faa8e5db9fc}, created = {2020-12-29T22:52:04.789Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.623Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Lorente-Leyva2018}, private_publication = {false}, abstract = {Normalized-cut clustering (NCC) is a benchmark graph-based approach for unsupervised data analysis. Since its traditional formulation is a quadratic form subject to orthogonality conditions, it is often solved within an eigenvector-based framework. Nonetheless, in some cases the calculation of eigenvectors is prohibitive or unfeasible due to the involved computational cost – for instance, when dealing with high dimensional data. In this work, we present an overview of recent developments on approaches to solve the NCC problem with no requiring the calculation of eigenvectors. Particularly, heuristic-search and quadratic-formulation-based approaches are studied. Such approaches are elegantly deduced and explained, as well as simple ways to implement them are provided.}, bibtype = {inbook}, author = {Lorente-Leyva, Leandro Leonardo and Herrera-Granda, Israel David and Rosero-Montalvo, Paul D. and Ponce-Guevara, Karina L. and Castro-Ospina, Andrés Eduardo and Becerra, Miguel A. and Peluffo-Ordóñez, Diego Hernán and Rodríguez-Sotelo, José Luis}, doi = {10.1007/978-3-319-92537-0_37}, chapter = {Developments on Solutions of the Normalized-Cut-Clustering Problem Without Eigenvectors}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Building a Nasa Yuwe Language Corpus and Tagging with a Metaheuristic Approach}, type = {article}, year = {2018}, keywords = {Global-best harmony search,Harmony search,Hill climbing,Nasa Yuwe language,Part of speech tagger,Tabu memory,Tagged corpus}, volume = {22}, websites = {http://www.cys.cic.ipn.mx/ojs/index.php/CyS/article/view/3018}, month = {9}, day = {30}, id = {8b0d966e-fbef-3c0c-8f10-56d8816c0a9e}, created = {2020-12-29T22:52:04.965Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.128Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {SierraMartinez2018}, private_publication = {false}, abstract = {Nasa Yuwe is the language of the Nasa indigenous community in Colombia. It is currently threatened with extinction. In this regard, a range of computer science solutions have been developed to the teaching and revitalization of the language. One of the most suitable approaches is the construction of a Part- Of-Speech Tagging (POST), which encourages the analysis and advanced processing of the language. Nevertheless, for Nasa Yuwe no tagged corpus exists, neither is there a POS Tagger and no related works have been reported. This paper therefore concentrates on building a linguistic corpus tagged for the Nasa Yuwe language and generating the first tagging application for Nasa Yuwe. The main results and findings are 1) the process of building the Nasa Yuwe corpus, 2) the tagsets and tagged sentences, as well as the statistics associated with the corpus, 3) results of two experiments to evaluate several POS Taggers (a Random tagger, three versions of HSTAGger, a tagger based on the harmony search metaheuristic, and three versions of a memetic algorithm GBHS Tagger, based on Global-Best Harmony Search (GBHS), Hill Climbing and an explicit Tabu memory, which obtained the best results in contrast with the other methods considered over the Nasa Yuwe language corpus.}, bibtype = {article}, author = {Sierra Martínez, Luz Marina and Cobos, Carlos Alberto and Corrales Muñoz, Juan Carlos and Rojas Curieux, Tulio and Herrera-Viedma, Enrique and Peluffo-Ordóñez, Diego Hernán}, doi = {10.13053/cys-22-3-3018}, journal = {Computación y Sistemas}, number = {3} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Data fusion,Diagnostic decision support,Multimodal fusion,Physiological signal,Signal processing}, pages = {1-15}, websites = {http://link.springer.com/10.1007/978-3-319-98998-3_1}, id = {a6d285aa-a629-33c5-8eea-086196f69926}, created = {2020-12-29T22:52:05.140Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.297Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Uribe2018}, private_publication = {false}, abstract = {The analysis of physiological signals is widely used for the development of diagnosis support tools in medicine, and it is currently an open research field. The use of multiple signals or physiological measures as a whole has been carried out using data fusion techniques commonly known as multimodal fusion, which has demonstrated its ability to improve the accuracy of diagnostic care systems. This paper presents a review of state of the art, putting in relief the main techniques, challenges, gaps, advantages, disadvantages, and practical considerations of data fusion applied to the analysis of physiological signals oriented to diagnosis decision support. Also, physiological signals data fusion architecture oriented to diagnosis is proposed.}, bibtype = {inbook}, author = {Uribe, Y. F. and Alvarez-Uribe, K. C. and Peluffo-Ordoñez, D. H. and Becerra, M. A.}, doi = {10.1007/978-3-319-98998-3_1}, chapter = {Physiological Signals Fusion Oriented to Diagnosis - A Review}, title = {Communications in Computer and Information Science} }
@inbook{ type = {inbook}, year = {2018}, keywords = {ANN,EMG signals,Feature extraction,KNN,Parzen}, pages = {368-375}, websites = {http://link.springer.com/10.1007/978-3-030-01132-1_42}, id = {37491735-6fdf-3f01-9815-985c68820597}, created = {2020-12-29T22:52:05.141Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.254Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Lasso-Arciniegas2018}, private_publication = {false}, abstract = {The analysis of electromyographic (EMG) signals enables the development of important technologies for industry and medical environments, due mainly to the design of EMG-based human-computer interfaces. There exists a wide range of applications encompassing: Wireless-computer controlling, rehabilitation, wheelchair guiding, and among others. The semantic interpretation of EMG analysis is typically conducted by machine learning algorithms, and mainly involves stages for signal characterization and classification. This work presents a methodology for comparing a set of state-of-the-art approaches of EMG signal characterization and classification within a movement identification framework. We compare the performance of three classifiers (KNN, Parzen-density-based classifier and ANN) using spectral (Wavelets) and time-domain-based (statistical and morphological descriptors) features. Also, a methodology for movement selection is proposed. Results are comparable with those reported in literature, reaching classification performance of (90.89 ± 1.12)% (KNN), (93.92 ± 0.34)% (ANN) and 91.09 ± 0.93 (Parzen-density-based classifier) with 12 movements.}, bibtype = {inbook}, author = {Lasso-Arciniegas, Laura and Viveros-Melo, Andres and Salazar-Castro, José A. and Becerra, Miguel A. and Castro-Ospina, Andrés Eduardo and Revelo-Fuelagán, E. Javier and Peluffo-Ordóñez, Diego H.}, doi = {10.1007/978-3-030-01132-1_42}, chapter = {Movement Identification in EMG Signals Using Machine Learning: A Comparative Study}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, pages = {96-106}, websites = {http://link.springer.com/10.1007/978-3-319-78723-7_8}, id = {d9562a17-a4ff-3b5d-9ebe-950764759d3c}, created = {2020-12-29T22:52:12.205Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.125Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Lopez-Chamorro2018}, private_publication = {false}, abstract = {This paper proposes an approach for modeling cardiac pulses from electrocardiographic signals (ECG). A modified van der Pol oscillator model (mvP) is analyzed, which, under a proper configuration, is capable of describing action potentials, and, therefore, it can be adapted for modeling a normal cardiac pulse. Adequate parameters of the mvP system response are estimated using non-linear dynamics methods, like dynamic time warping (DTW). In order to represent an adaptive response for each individual heartbeat, a parameter tuning optimization method is applied which is based on a genetic algorithm that generates responses that morphologically resemble real ECG. This feature is particularly relevant since heartbeats have intrinsically strong variability in terms of both shape and length. Experiments are performed over real ECG from MIT-BIH arrhythmias database. The application of the optimization process shows that the mvP oscillator can be used properly to model the ideal cardiac rate pulse.}, bibtype = {inbook}, author = {Lopez-Chamorro, Fabián M. and Arciniegas-Mejia, Andrés F. and Imbajoa-Ruiz, David Esteban and Rosero-Montalvo, Paul D. and García, Pedro and Castro-Ospina, Andrés Eduardo and Acosta, Antonio and Peluffo-Ordóñez, Diego Hernán}, doi = {10.1007/978-3-319-78723-7_8}, chapter = {Cardiac Pulse Modeling Using a Modified van der Pol Oscillator and Genetic Algorithms}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Classification,EMG signals,Movements selection,Wavelet}, pages = {139-149}, websites = {http://link.springer.com/10.1007/978-3-319-98998-3_11}, id = {b5a9e55f-8737-3dd3-b1cf-6b117d533390}, created = {2020-12-29T22:52:12.352Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.186Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Viveros-Melo2018}, private_publication = {false}, abstract = {Today, human-computer interfaces are increasingly more often used and become necessary for human daily activities. Among some remarkable applications, we find: Wireless-computer controlling through hand movement, wheelchair directing/guiding with finger motions, and rehabilitation. Such applications are possible from the analysis of electromyographic (EMG) signals. Despite some research works have addressed this issue, the movement classification through EMG signals is still an open challenging issue to the scientific community -especially, because the controller performance depends not only on classifier but other aspects, namely: used features, movements to be classified, the considered feature-selection methods, and collected data. In this work, we propose an exploratory work on the characterization and classification techniques to identifying movements through EMG signals. We compare the performance of three classifiers (KNN, Parzen-density-based classifier and ANN) using spectral (Wavelets) and time-domain-based (statistical and morphological descriptors) features. Also, a methodology for movement selection is proposed. Results are comparable with those reported in literature, reaching classification errors of 5.18% (KNN), 14.7407% (ANN) and 5.17% (Parzen-density-based classifier).}, bibtype = {inbook}, author = {Viveros-Melo, A. and Lasso-Arciniegas, L. and Salazar-Castro, J. A. and Peluffo-Ordóñez, D. H. and Becerra, M. A. and Castro-Ospina, A. E. and Revelo-Fuelagán, E. J.}, doi = {10.1007/978-3-319-98998-3_11}, chapter = {Exploration of Characterization and Classification Techniques for Movement Identification from EMG Signals: Preliminary Results}, title = {Communications in Computer and Information Science} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Brain-computer interface,Data fusion,Evaluation system,Information quality}, pages = {289-300}, websites = {http://link.springer.com/10.1007/978-3-319-78759-6_27}, id = {2395c2b1-a5b1-3978-b8f0-cce08c5538b7}, created = {2020-12-29T22:52:12.750Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.566Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2018c}, private_publication = {false}, abstract = {The evaluation of the data/information fusion systems does not have standard quality criteria making the reuse and optimization of these systems a complex task. In this work, we propose a complete low data fusion (DF) framework based on the Joint Director of Laboratories (JDL) model, which considers contextual information alongside information quality (IQ) and performance evaluation system to optimize the DF process according to the user requirements. A set of IQ criteria was proposed by level. The model was tested with a brain-computer interface (BCI) system multi-environment to prove its functionality. The first level makes the selection and preprocessing of electroencephalographic signals. In level one feature extraction is carried out using discrete wavelet transform (DWT), nonlinear and linear statistical measures, and Fuzzy Rough Set – FRS algorithm for selecting the relevant features; finally, in the same level a classification process was conducted using support vector machine – SVM. A Fuzzy Inference system is used for controlling different processes based on the results given by an IQ evaluation system, which applies quality measures that can be weighted by the users of the system according to their requirements. Besides, the system is optimized based on the results given by the cuckoo search algorithm, which uses the IQ traceability for maximizing the IQ criteria according to user requirements. The test was carried out with different type and levels of noise applied to the signals. The results showed the capability and functionality of the model.}, bibtype = {inbook}, author = {Becerra, Miguel Alberto and Alvarez-Uribe, Karla C. and Peluffo-Ordoñez, Diego Hernán}, doi = {10.1007/978-3-319-78759-6_27}, chapter = {Low Data Fusion Framework Oriented to Information Quality for BCI Systems}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Cascade classification,Case-based reasoning,Preprocessing,Probability}, pages = {26-38}, websites = {http://link.springer.com/10.1007/978-3-319-78723-7_3}, id = {33c67c08-0eeb-3ff2-8b80-1f14a9756e97}, created = {2020-12-29T22:52:12.828Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.710Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {BlancoValencia2018}, private_publication = {false}, abstract = {Case-Based Reasoning Systems (CBR) are in constant evolution, as a result, this article proposes improving the retrieve and adaption stages through a different approach. A series of experiments were made, divided in three sections: a proper pre-processing technique, a cascade classification, and a probability estimation procedure. Every stage offers an improvement, a better data representation, a more efficient classification, and a more precise probability estimation provided by a Support Vector Machine (SVM) estimator regarding more common approaches. Concluding, more complex techniques for classification and probability estimation are possible, improving CBR systems performance due to lower classification error in general cases.}, bibtype = {inbook}, author = {Blanco Valencia, X. and Bastidas Torres, D. and Piñeros Rodriguez, C. and Peluffo-Ordóñez, D. H. and Becerra, M. A. and Castro-Ospina, A. E.}, doi = {10.1007/978-3-319-78723-7_3}, chapter = {Case-Based Reasoning Systems for Medical Applications with Improved Adaptation and Recovery Stages}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Electroencephalographic signal,Sensorial stimulus,Signal processing,Tactile pleasantness}, pages = {309-316}, websites = {http://link.springer.com/10.1007/978-3-030-01132-1_35}, id = {bd7e4365-4309-31a4-a800-2c9dadf63bbc}, created = {2020-12-29T22:52:13.031Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.580Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2018b}, private_publication = {false}, abstract = {Haptic textures are alterations of any surface that are perceived and identified using the sense of touch, and such perception affects individuals. Therefore, it has high interest in different applications such as multimedia, medicine, marketing, systems based on human-computer interface among others. Some studies have been carried out using electroencephalographic signals; nevertheless, this can be considered few. Therefore this is an open research field. In this study, an analysis of tactile stimuli and emotion effects was performed from EEG signals to identify pleasantness and unpleasantness sensations using classifier systems. The EEG signals were acquired using Emotiv Epoc+ of 14 channels following a protocol for presenting ten different tactile stimuli two times. Besides, three surveys (Becks depression, emotion test, and tactile stimuli pleasant level) were applied to three volunteers for establishing their emotional state, depression, anxiety and the pleasantness level to characterize each subject. Then, the results of the surveys were computed and the signals preprocessed. Besides, the registers were labeled as pleasant and unpleasant. Feature extraction was applied from Short Time Fourier Transform and discrete wavelet transform calculated to each sub-bands (ƍ, θ, α, β, and γ) of EEG signals. Then, Rough Set algorithm was applied to identify the most relevant features. Also, this technique was employed to establish relations among stimuli and emotional states. Finally, five classifiers based on the support vector machine were tested using 10-fold cross-validation achieving results upper to 99% of accuracy. Also, dependences among emotions and pleasant and unpleasant tactile stimuli were identified.}, bibtype = {inbook}, author = {Becerra, Miguel A. and Londoño-Delgado, Edwin and Pelaez-Becerra, Sonia M. and Castro-Ospina, Andrés Eduardo and Mejia-Arboleda, Cristian and Durango, Julián and Peluffo-Ordóñez, Diego H.}, doi = {10.1007/978-3-030-01132-1_35}, chapter = {Electroencephalographic Signals and Emotional States for Tactile Pleasantness Classification}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Air Quality Monitoring Intelligent System Using Machine Learning Techniques}, type = {inproceedings}, year = {2018}, keywords = {Air quality,Intelligent system,Monitoring system}, pages = {75-80}, websites = {https://ieeexplore.ieee.org/document/8564511/}, month = {11}, publisher = {IEEE}, id = {805a29bd-6adc-3197-b975-2e7e0e2c562f}, created = {2020-12-29T22:52:13.041Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.748Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2018}, private_publication = {false}, abstract = {Environment monitoring is so important because it is based on the first right of people, life and health. For this reason, this system monitoring air quality with different sensor nodes in the Ibarra that evaluate the parameters of CO2, NOx, UV Light, Temperature and Humidity. The data analysis through machine learning algorithms allow the system to classify autonomously if a certain geographical location is exceeding the established emission limits of gases. As a result, the k-Nearest Neighbor algorithm presented a great classification performance when selecting the most contaminated sectors.}, bibtype = {inproceedings}, author = {Rosero-Montalvo, Paul D. and Caraguay-Procel, Jorge A. and Jaramillo, Edgar D. and Michilena-Calderon, Jaime M. and Umaquinga-Criollo, Ana C. and Mediavilla-Valverde, Mario and Ruiz, Miguel A. and Beltran, Luis A. and Peluffo, Diego H.}, doi = {10.1109/INCISCOS.2018.00019}, booktitle = {2018 International Conference on Information Systems and Computer Science (INCISCOS)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Dimensionality reduction,Generalized methodology,Kernel approximations,Low-computational cost,Multiple kernel learning,Spectral methods}, pages = {661-669}, websites = {http://link.springer.com/10.1007/978-3-030-03493-1_69}, id = {7f5a4e77-19ba-3891-8980-01b62e7198a9}, created = {2020-12-29T22:52:13.756Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.911Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Salazar-Castro2018}, private_publication = {false}, abstract = {Dimensionality reduction (DR) is a methodology used in many fields linked to data processing, and may represent a preprocessing stage or be an essential element for the representation and classification of data. The main objective of DR is to obtain a new representation of the original data in a space of smaller dimension, such that more refined information is produced, as well as the time of the subsequent processing is decreased and/or visual representations more intelligible for human beings are generated. The spectral DR methods involve the calculation of an eigenvalue and eigenvector decomposition, which is usually high-computational-cost demanding, and, therefore, the task of obtaining a more dynamic and interactive user-machine integration is difficult. Therefore, for the design of an interactive IV system based on DR spectral methods, it is necessary to propose a strategy to reduce the computational cost required in the calculation of eigenvectors and eigenvalues. For this purpose, it is proposed to use locally linear submatrices and spectral embedding. This allows integrating natural intelligence with computational intelligence for the representation of data interactively, dynamically and at low computational cost. Additionally, an interactive model is proposed that allows the user to dynamically visualize the data through a weighted mixture.}, bibtype = {inbook}, author = {Salazar-Castro, J. A. and Peña, D. F. and Basante, C. and Ortega, C. and Cruz-Cruz, L. and Revelo-Fuelagán, J. and Blanco-Valencia, X. P. and Castellanos-Domínguez, G. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-030-03493-1_69}, chapter = {Generalized Low-Computational Cost Laplacian Eigenmaps}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Data visualization,Dimensionality reduction,Interactive interface,Pairwise similarity}, pages = {557-567}, websites = {http://link.springer.com/10.1007/978-3-319-92537-0_64}, id = {809c6f3d-e207-3cc3-b58d-b72b0a9dbd33}, created = {2020-12-29T22:52:14.018Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.883Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Salazar-Castro2018a}, private_publication = {false}, abstract = {Dimensionality reduction (DR) methods are able to produce low-dimensional representations of an input data sets which may become intelligible for human perception. Nonetheless, most existing DR approaches lack the ability to naturally provide the users with the faculty of controlability and interactivity. In this connection, data visualization (DataVis) results in an ideal complement. This work presents an integration of DR and DataVis through a new approach for data visualization based on a mixture of DR resultant representations while using visualization principle. Particularly, the mixture is done through a weighted sum, whose weighting factors are defined by the user through a novel interface. The interface’s concept relies on the combination of the color-based and geometrical perception in a circular framework so that the users may have a at hand several indicators (shape, color, surface size) to make a decision on a specific data representation. Besides, pairwise similarities are plotted as a non-weighted graph to include a graphic notion of the structure of input data. Therefore, the proposed visualization approach enables the user to interactively combine DR methods, while providing information about the structure of original data, making then the selection of a DR scheme more intuitive.}, bibtype = {inbook}, author = {Salazar-Castro, Jose Alejandro and Rosero-Montalvo, Paul D. and Peña-Unigarro, Diego Fernando and Umaquinga-Criollo, Ana Cristina and Castillo-Marrero, Zenaida and Revelo-Fuelagán, Edgardo Javier and Peluffo-Ordóñez, Diego Hernán and Castellanos-Domínguez, César Germán}, doi = {10.1007/978-3-319-92537-0_64}, chapter = {A Novel Color-Based Data Visualization Approach Using a Circular Interaction Model and Dimensionality Reduction}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Homotopy,Object deformation,Smooth transitions,Transcendental functions}, pages = {231-242}, websites = {http://link.springer.com/10.1007/978-3-319-78759-6_22}, id = {9a51bf48-234f-3317-bc33-78711e79218b}, created = {2020-12-29T22:52:14.180Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.261Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Salazar-Castro2018b}, private_publication = {false}, abstract = {This work explores novel alternatives to conventional linear homotopy to enhance the quality of resulting transitions from object deformation applications. Studied/introduced approaches extend the linear mapping to other representations that provides smooth transitions when deforming objects while homotopy conditions are fulfilled. Such homotopy approaches are based on transcendental functions (TFH) in both simple and parametric versions. As well, we propose a variant of an existing quality indicator based on the ratio between the coefficients curve of resultant homotopy and that of a less-realistic, reference homotopy. Experimental results depict the effect of proposed TFH approaches regarding its usability and benefit for interpolating images formed by homotopic objects with smooth changes.}, bibtype = {inbook}, author = {Salazar-Castro, Jose Alejandro and Umaquinga-Criollo, Ana Cristina and Cruz-Cruz, Lilian Dayana and Alpala-Alpala, Luis Omar and González-Castaño, Catalina and Becerra-Botero, Miguel A. and Peluffo-Ordóñez, Diego Hernán and Castellanos-Domínguez, Cesar Germán}, doi = {10.1007/978-3-319-78759-6_22}, chapter = {Advances in Homotopy Applied to Object Deformation}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Biometric identification,Cardiac murmur,Electrocardiographic signal,Signal processing}, pages = {410-418}, websites = {http://link.springer.com/10.1007/978-3-030-03493-1_43}, id = {d6dcd105-899e-3896-afb6-9538f96fefb6}, created = {2020-12-29T22:52:14.507Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.458Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2018}, private_publication = {false}, abstract = {The process of distinguishing among human beings through the inspection of acquired data from physical or behavioral traits is known as biometric identification. Mostly, fingerprint- and iris-based biometric techniques are used. Nowadays, since such techniques are highly susceptible to be counterfeited, new biometric alternatives are explored mainly based on physiological signals and behavioral traits -which are useful not only for biometric identification purposes, but may also play a role as a vital signal indicator. In this connection, the electrocardiographic (ECG) signals have shown to be a suitable approach. Nonetheless, their informative components (morphology, rhythm, polarization, and among others) can be affected by the presence of a cardiac pathology. Even more, some other cardiac diseases cannot directly be detected by the ECG signal inspection but still have an effect on their waveform, that is the case of cardiac murmurs. Therefore, for biometric purposes, such signals should be analyzed submitted to the effects of pathologies. This paper presents a exploratory study aimed at assessing the influence of the presence of a pathology when analyzing ECG signals for implementing a biometric system. For experiments, a data base holding 20 healthy subjects and 20 pathological subjects (diagnosed with different types of cardiac murmurs) are considered. The proposed signal analysis consists of preprocessing, characterization (using wavelet features), feature selection and classification (five classifiers as well as a mixture of them are tested). As a result, through the performed comparison of the classification rates when testing pathological and normal ECG signals, the cardiac murmurs’ undesired effect on the identification mechanism performance is clearly unveiled.}, bibtype = {inbook}, author = {Becerra, M. A. and Duque-Mejía, C. and Zapata-Hernández, C. and Peluffo-Ordóñez, D. H. and Serna-Guarín, L. and Delgado-Trejos, Edilson and Revelo-Fuelagán, E. J. and Blanco Valencia, X. P.}, doi = {10.1007/978-3-030-03493-1_43}, chapter = {Exploratory Study of the Effects of Cardiac Murmurs on Electrocardiographic-Signal-Based Biometric Systems}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Methodology for the design and simulation of industrial facilities and production systems based on a modular approach in an "industry 4.0" context}, type = {article}, year = {2018}, keywords = {Coffee processing plant,Industrial facilities,Industry 4.0,Layout,Production system,Simulation}, pages = {243-252}, volume = {85}, websites = {https://revistas.unal.edu.co/index.php/dyna/article/view/68545}, month = {10}, day = {1}, id = {9e0eb3d7-ba0d-3fd2-97dd-54c4f4d3f827}, created = {2020-12-29T22:52:14.627Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.455Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Alpala2018}, private_publication = {false}, abstract = {The design of the industrial facilities distribution is one of the most important decisions to be made, as it will condition the operation thereof. The concept of industrial installation as it is known today has evolved to the point that it integrates automation and information systems. Indeed, such evolution has given rise to the so-called intelligent factory. At present, in order to produce customized mass products according to customers' requirements, it is become an important issue the distribution of facilities with the generation of successful layout designs, based on the flexibility, modularity and easy configuration of production systems.This paper proposes a methodology to solve the problem of plant distribution design and redesign based upon a novel modular approach within an industry 4.0 context. Proposed methodology is an adaptation of the "SLP" Methodology (Systematic Layout Planning-Simulation) so-called SLP Modulary 4.0 (systematic planning of the Layout based on a modular vision under a context of Industry 4.0); this methodology incorporates in its structure an integrated design system (IDS) into its structure, which allows collaborative work with different CAD design and simulation tools. For the validation of the proposed methodology, a case study of a coffee processing plant is considered. The distribution design results obtained from the case study prove the benefit and usefulness of the proposed methodology.}, bibtype = {article}, author = {Alpala, Luis Omar and Alemany, Maria del Mar Eva and Peluffo, DIego Hernán and Bolaños, Fabio Andres and Rosero, Aura Maria and Torres, Juan Carlos}, doi = {10.15446/dyna.v85n207.68545}, journal = {DYNA}, number = {207} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Autonomous guidance,Computer vision,Crop row detection,Image segmentation}, pages = {355-366}, websites = {http://link.springer.com/10.1007/978-3-319-73450-7_34}, id = {0bb4408b-2be2-32fa-bebe-63381189ee1c}, created = {2020-12-29T22:52:14.810Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.587Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Garcia-Santillan2018}, private_publication = {false}, abstract = {This work presents an adaptation and validation of a method for automatic crop row detection from images captured in potato fields (Solanum tuberosum) for initial growth stages based on the micro-ROI concept. The crop row detection is a crucial aspect for autonomous guidance of agricultural vehicles and site-specific treatments application. The images were obtained using a color camera installed in the front of a tractor under perspective projection. There are some issues that can affect the quality of the images and the detection procedure, among them: uncontrolled illumination in outdoor agricultural environments, different plant densities, presence of weeds and gaps in the crop rows. The adapted approach was designed to address these adverse situations and it consists of three linked phases. The main contribution is the ability to detect straight and curved crop rows in potato crops. The performance was quantitatively compared against two existing methods, achieving acceptable results in terms of accuracy and processing time.}, bibtype = {inbook}, author = {García-Santillán, Iván and Peluffo-Ordoñez, Diego and Caranqui, Víctor and Pusdá, Marco and Garrido, Fernando and Granda, Pedro}, doi = {10.1007/978-3-319-73450-7_34}, chapter = {Computer Vision-Based Method for Automatic Detection of Crop Rows in Potato Fields}, title = {Advances in Intelligent Systems and Computing} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Clustering algorithms,Contraction hierarchies,Free software,K-means,Optimization,University transportation,Vehicle routing}, pages = {95-107}, websites = {http://link.springer.com/10.1007/978-3-319-92639-1_9}, id = {2ad172fb-c4ca-3c21-94e0-0c22cf4237a5}, created = {2020-12-29T22:52:14.819Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.578Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Herrera-Granda2018}, private_publication = {false}, abstract = {This research work focuses on the study of different models of solution reflected in the literature, which treat the optimization of the routing of vehicles by nodes and the optimal route for the university transport service. With the recent expansion of the facilities of a university institution, the allocation of the routes for the transport of its students, became more complex. As a result, geographic information systems (GIS) tools and operations research methodologies are applied, such as graph theory and vehicular routing problems, to facilitate mobilization and improve the students transport service, as well as optimizing the transfer time and utilization of the available transport units. An optimal route management procedure has been implemented to maximize the level of service of student transport using the K-means clustering algorithm and the method of node contraction hierarchies, with low cost due to the use of free software.}, bibtype = {inbook}, author = {Herrera-Granda, Israel D. and Lorente-Leyva, Leandro L. and Peluffo-Ordóñez, Diego H. and Valencia-Chapi, Robert M. and Montero-Santos, Yakcleem and Chicaiza-Vaca, Jorge L. and Castro-Ospina, Andrés E.}, doi = {10.1007/978-3-319-92639-1_9}, chapter = {Optimization of the University Transportation by Contraction Hierarchies Method and Clustering Algorithms}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Electroencephalographic signal,Emotion,Odor pleasantness,Sensorial stimuli,Signal processing}, pages = {128-138}, websites = {http://link.springer.com/10.1007/978-3-319-98998-3_10}, id = {e9b1b3f3-3c48-376b-a61f-9349c9a30495}, created = {2020-12-29T22:52:16.345Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.242Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2018a}, private_publication = {false}, abstract = {Odor identification refers to the capability of the olfactory sense for discerning odors. The interest in this sense has grown over multiple fields and applications such as multimedia, virtual reality, marketing, among others. Therefore, objective identification of pleasant and unpleasant odors is an open research field. Some studies have been carried out based on electroencephalographic signals (EEG). Nevertheless, these can be considered insufficient due to the levels of accuracy achieved so far. The main objective of this study was to investigate the capability of the classifiers systems for identification pleasant and unpleasant odors from EEG signals. The methodology applied was carried out in three stages. First, an odor database was collected using the signals recorded with an Emotiv Epoc+ with 14 channels of electroencephalography (EEG) and using a survey for establishing the emotion levels based on valence and arousal considering that the odor induces emotions. The registers were acquired from three subjects, each was subjected to 10 different odor stimuli two times. The second stage was the feature extraction which was carried out on 5 sub-bands δ, θ, α, β, γ of EEG signals using discrete wavelet transform, statistical measures, and other measures such as area, energy, and entropy. Then, feature selection was applied based on Rough Set algorithms. Finally, in the third stage was applied a Support vector machine (SVM) classifier, which was tested with five different kernels. The performance of classifiers was compared using k-fold cross-validation. The best result of 99.9% was achieved using the linear kernel. The more relevant features were obtained from sub-bands β and α. Finally, relations among emotion, EEG, and odors were demonstrated.}, bibtype = {inbook}, author = {Becerra, M. A. and Londoño-Delgado, E. and Pelaez-Becerra, S. M. and Serna-Guarín, L. and Castro-Ospina, A. E. and Marin-Castrillón, D. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-319-98998-3_10}, chapter = {Odor Pleasantness Classification from Electroencephalographic Signals and Emotional States}, title = {Communications in Computer and Information Science} }
@inbook{ type = {inbook}, year = {2018}, keywords = {Data visualization,Dimensionality reduction,Kernel PCA,Pairwise similarity}, pages = {149-157}, websites = {http://link.springer.com/10.1007/978-3-030-01132-1_17}, id = {be3aa382-2e00-34fc-aee6-14e7868cc8cd}, created = {2020-12-29T22:52:17.052Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.887Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Basante-Villota2018a}, private_publication = {false}, abstract = {In recent times, an undeniable fact is that the amount of data available has increased dramatically due mainly to the advance of new technologies allowing for storage and communication of enormous volumes of information. In consequence, there is an important need for finding the relevant information within the raw data through the application of novel data visualization techniques that permit the correct manipulation of data. This issue has motivated the development of graphic forms for visually representing and analyzing high-dimensional data. Particularly, in this work, we propose a graphical approach, which, allows the combination of dimensionality reduction (DR) methods using an angle-based model, making the data visualization more intelligible. Such approach is designed for a readily use, so that the input parameters are interactively given by the user within a user-friendly environment. The proposed approach enables users (even those being non-experts) to intuitively select a particular DR method or perform a mixture of methods. The experimental results prove that the interactive manipulation enabled by the here-proposed model-due to its ability of displaying a variety of embedded spaces-makes the task of selecting a embedded space simpler and more adequately fitted for a specific need.}, bibtype = {inbook}, author = {Basante-Villota, Cielo K. and Ortega-Castillo, Carlos M. and Peña-Unigarro, Diego F. and Revelo-Fuelagán, E. Javier and Salazar-Castro, Jose A. and Ortega-Bustamante, MacArthur and Rosero-Montalvo, Paul and Vega-Escobar, Laura Stella and Peluffo-Ordóñez, Diego H.}, doi = {10.1007/978-3-030-01132-1_17}, chapter = {Angle-Based Model for Interactive Dimensionality Reduction and Data Visualization}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Two novel clustering performance measures based on coherence and relative assignments of clusters}, type = {inproceedings}, year = {2017}, keywords = {Cluster coherence,Clustering,Graph-partitioning,Probabilities,Relative frequencies}, volume = {735}, websites = {https://link.springer.com/chapter/10.1007%2F978-3-319-66562-7_56}, id = {1b1346fc-ed11-384e-bfb0-4952104a4ee8}, created = {2020-12-09T05:26:52.526Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-07T20:48:27.329Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This work proposes two novel alternatives for dealing with the highly important issue of the clustering performance estimation. One of the measures is the cluster coherence aimed to quantifying the normalized ratio of cuts within a graph-partitioning framework, and therefore it uses a graph-driven approach to explore the nature of data regarding the cluster assignment. The another one is the probability-based-performance quantifier, which calculates a probability value for each cluster through relative frequencies. Proposed measures are tested on some clustering representative techniques applied to real and artificial data sets. Experimental results probe the readability and robustness to noisy labels of our measures.}, bibtype = {inproceedings}, author = {Areiza-Laverde, H.J. and Castro-Ospina, A.E. and Rosero-Montalvo, P. and Peluffo-Ordóñez, D.H. and Rodríguez-Sotelo, J.L. and Becerra-Botero, M.A.}, doi = {10.1007/978-3-319-66562-7_56}, booktitle = {Communications in Computer and Information Science} }
@inbook{ type = {inbook}, year = {2017}, keywords = {Brain-computer interface,Environments,Mixture of classifiers,Signal processing}, pages = {511-523}, websites = {http://link.springer.com/10.1007/978-3-319-66562-7_37}, id = {c9b55fdc-ddf1-3257-af47-8e3ea998018d}, created = {2020-12-29T22:52:03.954Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.188Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Ortega-Adarme2017}, private_publication = {false}, abstract = {Brain-computer interface (BCI) is a system that provides communication between human beings and machines through an analysis of human brain neural activity. Several studies on BCI systems have been carried out in controlled environments, however, a functional BCI should be able to achieve an adequate performance in real environments. This paper presents a comparative study on alternative classification options to analyze motor imaginary BCI within multi-environment real scenarios based on mixtures of classifiers. The proposed methodology is as follows: The imaginary movement detection is carried out by means of feature extraction and classification, in the first stage; feature set is obtained from wavelet transform, empirical mode decomposition, entropy, variance and rates between minimum and maximum, in the second stage, where several classifier combinations are applied. The system is validated using a database, which was constructed using the Emotiv Epoc+ with 14 channels of electroencephalography (EEG) signals. These were acquired from three subject in 3 different environments with the presence and absence of disturbances. According to the different effects of the disturbances analyzed in the three environments, the performance of the mixture of classifiers presented better results when compared to the individual classifiers, making it possible to provide guidelines for choosing the appropriate classification algorithm to incorporate into a BCI system.}, bibtype = {inbook}, author = {Ortega-Adarme, M. and Moreno-Revelo, M. and Peluffo-Ordoñez, D. H. and Marín Castrillon, D. and Castro-Ospina, A. E. and Becerra, M. A.}, doi = {10.1007/978-3-319-66562-7_37}, chapter = {Analysis of Motor Imaginary BCI Within Multi-environment Scenarios Using a Mixture of Classifiers}, title = {Communications in Computer and Information Science} }
@article{ title = {Kernel-based framework for spectral dimensionality reduction and clustering formulation: A theoretical study}, type = {article}, year = {2017}, pages = {31}, volume = {6}, websites = {http://revistas.usal.es/index.php/2255-2863/article/view/ADCAIJ2017613140}, month = {1}, day = {12}, id = {162cbf63-5ec6-39dc-bc8f-6ebfef2e6a69}, created = {2020-12-29T22:52:04.090Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.328Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {BLANCOVALENCIA2017}, private_publication = {false}, abstract = {See, stats, and : https :// www. researchgate. net / publication/ 315475649 Kernel-based dimensionality formulation: A Article DOI : 10 . 14201 / ADCAIJ2017613140 CITATIONS 0 READS 57 6 , including : Some : DATA Case (CBR) for Xiomara Universidad 8 SEE Miguel Institución 35 SEE A . E . Castro - Ospina Instituto 26 SEE Diego Universidad 136 SEE All . The . KEYWORD ABSTRACT Kernel PCA ; Spectral clustering ; Support vector machine . This work outlines a unified formulation to represent spectral approaches for both dimensionality reduction and clustering . Proposed formulation starts with a generic latent variable model in terms of the projected input data matrix . Particularly , such a projection maps data onto a unknown high - dimensional space . Regarding this mod - el , a generalized optimization problem is stated using quadratic formulations and a least - squares support vector machine . The solution of the optimization is addressed through a primal - dual scheme . Once latent variables and parameters are determined , the resultant model outputs a versatile projected matrix able to represent data in a low - dimensional space , as well as to provide information about clusters . Particularly , proposed formulation yields solutions for kernel spectral clustering and weighted - ker - nel principal component analysis .}, bibtype = {article}, author = {BLANCO VALENCIA, Xiomara Patricia and BECERRA, M. A. and CASTRO OSPINA, A. E. and ORTEGA ADARME, M. and VIVEROS MELO, D. and PELUFFO ORDÓÑEZ, D. H.}, doi = {10.14201/ADCAIJ2017613140}, journal = {ADCAIJ: Advances in Distributed Computing and Artificial Intelligence Journal}, number = {1} }
@inbook{ type = {inbook}, year = {2017}, pages = {444-455}, websites = {http://link.springer.com/10.1007/978-3-319-59740-9_44}, id = {b1f2a82e-92fe-3f3c-992f-ccf2ca809a7d}, created = {2020-12-29T22:52:04.214Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.528Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rodriguez-Sotelo2017}, private_publication = {false}, abstract = {Sleep stage classification is a highly addressed issue in polysomnography; It is considered a tedious and time-consuming task if done manually by the specialist; therefore, from the engineering point of view, several methods have been proposed to perform an automatic sleep stage classification. In this paper an unsupervised approach to automatic sleep stage clustering of EEG signals is proposed which uses spectral features related to signal power, coherences, asymmetries, and Wavelet coefficients; the set of features is classified using a clustering algorithm that optimizes a cost function of minimum sum of squares. Accuracy and kappa coefficients are comparable to those of the current literature as well as individual stage classification results. Methods and results are discussed in the light of the current literature, as well as the utility of the groups of features to differentiate the states of sleep. Finally, clustering techniques are recommended for implementation in support systems for sleep stage scoring.}, bibtype = {inbook}, author = {Rodríguez-Sotelo, J. L. and Osorio-Forero, A. and Jiménez-Rodríguez, A. and Restrepo-de-Mejía, F. and Peluffo-Ordoñez, D. H. and Serrano, J.}, doi = {10.1007/978-3-319-59740-9_44}, chapter = {Sleep Stages Clustering Using Time and Spectral Features of EEG Signals}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2017}, websites = {https://www.diegopeluffo.com/publicaciones/2017_UsabilidadDrones_CISCIC.pdf}, id = {e4f05077-fdd8-3110-a3ec-7c1efa40db94}, created = {2020-12-29T22:52:04.363Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.517Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {SandraKarinaNarvaezPupialesAnaCristinaUmaquingaCriollo2017}, private_publication = {false}, abstract = {The Ecuadorian economy has been based mainly on the exploitation of raw materials and imports of goods and services, this ecosystem not encouraging development as a result in certain periods of time and according to the variability of the international market has caused to this day swings, as a result of changes in the prices of these resources compared to prices of products with higher added value and high technology. One of the main objectives of the Change of the Productive Matrix is to evolve this pattern of primary exporting specialization to a pattern of diversified production highlighting especially the capacities and knowledge of human talent involved in these processes.In order to frame the economic and social development of the population around this change, the incorporation of technology has become one of the main axes to improve andachieve the proposed objectives, being in this way a vital tool to improve production. Of the many technological possibilities that exist, this research highlights, analyzes and evaluates the incorporation of unmanned Aerial Systems or Vehicles commonly known as DRONES to the agricultural sector with special emphasis on the advantages, disadvantages, risks, acceptance levels, components of hardware and software, statistics related to production cost savings and how the current Regulation by the Civil Aviation Directorate in Ecuador contributes to the development and usability of these systems.Finally, the conclusions point to the great possibilities of growth and transformation of agriculture to effective methods of precision where the aid of DRONES and automation can provide a large amount of information such as reports, images, videos, maps andothers where human intervention becomes minimal Resumen La economía ecuatoriana se ha basado fundamentalmente en la explotación de materias primas e importación de bienes y servicios, este ecosistema poco alentador de desarrollo como resultado en ciertos periodos de tiempo y de acuerdo a la variabilidad del mercado internacional ha provocado hasta el día de hoy vaivenes, a consecuencia de las variaciones de los precios de estos recursos frente a precios de productos de mayor valor agregado y alta tecnología. Uno de los objetivos principales del Cambio de la Matriz Productiva es evolucionar este patrón de especialización primario exportador a un patrón de producción diversificado resaltando especialmente las capacidades y conocimientos del talento humano interviniente en estos procesos.Para enmarcar el desarrollo económico y social de la población alrededor de este cambio, la incorporación de la tecnología se ha convertido en uno de los principales ejes para mejorar y conseguir los objetivos propuestos, siendo de esta forma una herramienta vital para mejorar la producción. De las muchas posibilidades tecnológicas existentes, en esta investigación se resalta, analiza y evalúa la incorporación de los Sistemas o Vehículos Aéreos no tripulados conocidos comúnmente como DRONES al sector agrícola con especial énfasis enlas ventajas, desventajas, riesgos, niveles de aceptación, componentes de hardware y software, estadísticas relacionadas a los ahorros de costos de producción y cómo la Regulación vigente por parte de la Dirección General de Aviación Civil en el Ecuador contribuye al desarrollo y usabilidad de estos sistemas.Finalmente las conclusiones exponen las grandes posibilidades de crecimiento y transformación de la agricultura a métodos efectivos de precisión en dónde la ayuda de los vehículos aéreos no tripulados (DRONES) y automatización pueden facilitar gran cantidad de información tales como informes, imágenes, videos, mapas y otros en donde la intervención humana llega a ser mínima}, bibtype = {inbook}, author = {Sandra Karina Narváez Pupiales Ana Cristina Umaquinga Criollo, Diego Hernán Peluffo Ordoñez}, chapter = {IMPACTO DE LA USABILIDAD DE LOS DRONES EN LA AGREGACIÓN DEL VALOR EN LA PRODUCCIÓN. IMPACT OF USABILITY DRONES IN THE AGGREGATION OF VALUE IN PRODUCTION}, title = {III JORNADAS ACADÉMICAS INTERNACIONALES CISIC 2017} }
@inbook{ type = {inbook}, year = {2017}, pages = {456-463}, websites = {http://link.springer.com/10.1007/978-3-319-59740-9_45}, id = {4e78cbfc-00e2-3c79-9695-56eea1e16d53}, created = {2020-12-29T22:52:04.782Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.507Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rodriguez-Sotelo2017a}, private_publication = {false}, abstract = {In this work, an efficient non-supervised algorithm for clustering of ECG signals is presented. The method is assessed over a set of records from MIT/BIH arrhythmia database with different types of heartbeats, including normal (N) heartbeats, as well as the arrhythmia heartbeats recommended by the AAMI, usually found in Holter recordings: ventricular extra systoles (VE), left and right branch bundles blocks (LBBB and RBBB) and atrial premature beats (APB). The results are assessed by means the sensitivity and specificity measures, taking advantage of the database labels. Also, unsupervised performance measures are used. Finally, the performance of the algorithm is in average 95%, improving results reported by previous works of the literature.}, bibtype = {inbook}, author = {Rodríguez-Sotelo, J. L. and Peluffo-Ordoñez, D. H. and López-Londoño, D. and Castro-Ospina, A.}, doi = {10.1007/978-3-319-59740-9_45}, chapter = {Segment Clustering for Holter Recordings Analysis}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Face Detection and Classification Using Eigenfaces and Principal Component Analysis: Preliminary Results}, type = {inproceedings}, year = {2017}, keywords = {Eigenfaces,PCA,face recognition}, pages = {309-315}, websites = {http://ieeexplore.ieee.org/document/8328124/}, month = {11}, publisher = {IEEE}, id = {1ff56eb2-bf9b-3f48-aed0-abf7d4226ba5}, created = {2020-12-29T22:52:11.917Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.891Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Mejia-Campos2017}, private_publication = {false}, abstract = {This work is a Scientific Track paper corresponding to the area of Intelligent Systems. This paper presents a facial recognition approach based on the Eigenfaces method as well as Principal Component Analysis (PCA) as algorithm of processing and cleaning images, respectively. The classification was performed by using the Euclidean distance between the facial characters stored in a database and new images captured in an interface with similarly coded developed in MatLab. As main results, we obtained: (i) 68.9% of classification accuracy when using different components of stored faces, (ii) 91.43% of classification performance when storing 3 components for each face and evaluating more users for training model in seven controlled experiments.}, bibtype = {inproceedings}, author = {Mejia-Campos, Richard and Nejer-Haro, Diego and Recalde-Avincho, Santiago and Rosero-Montalvo, Paul and Peluffo-Ordonez, Diego}, doi = {10.1109/INCISCOS.2017.59}, booktitle = {2017 International Conference on Information Systems and Computer Science (INCISCOS)} }
@article{ title = {Empleo del estropajo común (Luffa cylindrica) en la remoción de contaminantes.}, type = {article}, year = {2017}, pages = {205-215}, volume = {8}, websites = {http://hemeroteca.unad.edu.co/index.php/riaa/article/view/1850}, month = {6}, day = {5}, id = {a8eec0b6-8748-384c-a61e-f84bd6eea1ed}, created = {2020-12-29T22:52:12.195Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.893Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Pereira-Martinez2017}, private_publication = {false}, abstract = {Interesa a la ingeniería, el desarrollo biotecnológico de técnicas de tratamiento de agua para la remoción de contaminantes, aprovechando las propiedades de las fibras de Luffa cylindrica (FLc). Así lo exponen los argumentos explicados en varias investigaciones realizadas en todo el mundo en las que se describe un sugestivo escenario de razones válidas para considerar a la fibra del estropajo, como un material industrialmente promisorio y sostenible, apto para la realización de tratamientos de remoción de contaminantes y en la separación de sustancias inmersas en matrices fluidas. También explican la utilización de las fibras como matriz inmovilizadora para sostener comunidades microbianas activas implantadas con fines específicos; incluso, al comprender la arquitectura y las propiedades mecánicas de las FLc, se explora su utilización como agregado en la obtención de materiales compuestos, en la producción de nuevas sustancias y su capacidad para retener humedad. El presente artículo se refiere a la descripción de los procesos de adsorción e inmovilización en los que se ha involucrado a las FLc haciendo una revisión de experiencias investigativas. A la vez se estudian, los razonamientos que han permitido describir las técnicas y que posibilitan el aporte de soluciones al problema de la remoción de contaminantes y del tratamiento de agua.}, bibtype = {article}, author = {Pereira-Martínez, Ricardo Ignacio and Muñoz-Paredes, Juan Fernando and Peluffo-Ordoñez, Diego Hernán}, doi = {10.22490/21456453.1850}, journal = {Revista de Investigación Agraria y Ambiental}, number = {1} }
@inbook{ type = {inbook}, year = {2017}, websites = {https://www.diegopeluffo.com/publicaciones/2017_BigDataAnalyticsEmpresa_CISIC.pdf}, id = {85988dfb-42f8-39e0-ba33-373f89bebe72}, created = {2020-12-29T22:52:12.204Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.036Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {AnaCristinaUmaquinga-CriolloSandraKarinaNarvaez-Pupiales2017}, private_publication = {false}, abstract = {Ante el crecimiento exponencial y vertiginoso del volumen de los datos de diferente tipo: estructurados, semiestructurados y no estructurados provenientes de una variedad de fuentes entre ellas: la web, redes sociales, bases de datos, archivos de audio/video, datos transaccionales, sensores, comunicación máquina a máquina (denominado M2M). El área de Big Data pretende dar respuesta a los desafíos del tratamiento de la información.Es por ello, que el proceso de análisis de grandes volúmenes de datos Big Data Analytics (denominado BDA) facilita el descubrimiento de patrones, predicciones, fraudes, tendencias de mercado, comportamientos y preferencias de los clientes e información de utilidad, que no sería posible con las herramientas convencionales. BDA se convierte en una de las herramientas de soporte para la toma de decisiones empresariales y ventaja competitiva en tiempo real o en el menor tiempo posible frente a sus competidores, ofreciendo nuevos niveles de competitividad, procesos, modelos de negocio basados en datos y reducción del riesgo para conservar, fidelizar y captar una mayor cantidad de clientes generando un aumento en las fuentes de ingreso de las empresas.El presente artículo es de tipo exploratorio, descriptivo y documental. Se realiza un estudio descriptivo del impacto de Big Data Analytics (BDA) en el campo empresarial, así como un breve recorrido por sus tendencias, oportunidades, dificultades y retos.Este estudio pretende contribuir a la comunidad de investigadores, así como al personal de las empresas y a quienes se inician en el conocimiento de Big Data Analytics para una mejor comprensión en este campo Abstract By the exponential and vertiginous growth of the volume of data of different types: structured, semi-structured and unstructured from a variety of sources including: the web, social networks, databases, audio / video files, transactional data, sensors, machine-to-machine communication (denominated M2M). The Big-Data-area is intended to address the challenges of information processing.Therefore, the Big Data Analytics (BDA) process of large volumes of data facilitates the discovery of patterns, predictions, fraud, market trends, customer behaviours and preferences and useful information that would not be possible with conventional tools. BDA becomes one of the tools to support business decision-making and competitive advantage in real time or in the shortest possible time in relation its competitors, offering new levels of competitiveness, processes, business models based in data and risk reduction, to conserve, retain and attract a greater number of customers generating an increase in the sources of income of companies.This article is exploratory, descriptive and documentary. A descriptive study of the impact of Big Data Analytics (BDA) in the business field, as well as a brief tour of its tendencies, opportunities, difficulties and challenges.This study aims to contribute to the research community, as well as the staff of the companies and those who are introduced to the knowledge of Big Data Analytics for a better understanding in this field}, bibtype = {inbook}, author = {Ana Cristina Umaquinga-Criollo Sandra Karina Narvaez-Pupiales, Diego Hernán Peluffo-Ordoñez MacArthur Cosme Ortega-Bustamante}, chapter = {BIG DATA ANALYTICS (BDA) EN LA TOMA DE DECISIONES EMPRESARIALES. BIG DATA ANALYTICS (BDA) IN BUSINESS DECISION MAKING}, title = {III JORNADAS ACADÉMICAS INTERNACIONALES CISIC 2017} }
@inproceedings{ title = {Elderly fall detection using data classification on a portable embedded system}, type = {inproceedings}, year = {2017}, keywords = {Embedded system,Fall detection,Knn,Prototype selection}, pages = {1-4}, websites = {http://ieeexplore.ieee.org/document/8247529/}, month = {10}, publisher = {IEEE}, id = {248703ed-fb23-37da-bc86-9841e24cb616}, created = {2020-12-29T22:52:12.207Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.061Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2017a}, private_publication = {false}, abstract = {The area of research on the detection of falls in the elderly allows to prevent major ailments to a person and not receiving timely medical attention. Although different systems have been proposed for the detection of falls, there are some open problems such as: cost, computational load, precision, portability, among others. This paper presents an alternative approach based on the acquisition of speed variation of the person on the X, Y and Z axes using an accelerometer and machine learning techniques. Since the information acquired by the sensor is very variant, with noise and high volume of data, a prototype selection stage is carried out using confidence intervals and techniques of Leaving-One-Out. Subsequently, automatic detection is performed using the K-nearest neighbors (K-NN) classifier. As a result of fall detection 95% accuracy is achieved in experiments from 5 trials and already used in reality by an older adult, the system has a time of 30 ms for position selection and the detection of drop is maintained in a 92% right.}, bibtype = {inproceedings}, author = {Rosero-Montalvo, P.D. and Peluffo-Ordonez, D.H. and Godoy, Pamela and Ponce, K. and Rosero, E.A. and Vasquez, C.A. and Cuzme, F. and Flores, S.C and Mera, Z. A.}, doi = {10.1109/ETCM.2017.8247529}, booktitle = {2017 IEEE Second Ecuador Technical Chapters Meeting (ETCM)} }
@inbook{ type = {inbook}, year = {2017}, keywords = {Data visualization,Dimensionality reduction,Pairwise similarity}, pages = {334-342}, websites = {http://link.springer.com/10.1007/978-3-319-52277-7_41}, id = {041dab26-05da-3551-adbb-dc9e35314022}, created = {2020-12-29T22:52:12.544Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.417Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2017}, private_publication = {false}, abstract = {This work presents a new interactive data visualization approach based on mixture of the outcomes of dimensionality reduction (DR) methods. Such a mixture is a weighted sum, whose weighting factors are defined by the user through a visual and intuitive interface. Additionally, the low-dimensional representation space produced by DR methods are graphically depicted using scatter plots powered via an interactive data-driven visualization. To do so, pairwise similarities are calculated and employed to define the graph to be drawn on the scatter plot. Our visualization approach enables the user to interactively combine DR methods while provided information about the structure of original data, making then the selection of a DR scheme more intuitive.}, bibtype = {inbook}, author = {Rosero-Montalvo, P. and Diaz, P. and Salazar-Castro, J. A. and Peña-Unigarro, D. F. and Anaya-Isaza, A. J. and Alvarado-Pérez, J. C. and Therón, R. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-319-52277-7_41}, chapter = {Interactive Data Visualization Using Dimensionality Reduction and Similarity-Based Representations}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Análisis no supervisado aplicado a la detección de arritmias cardiacas}, type = {article}, year = {2017}, pages = {257-272}, volume = {8}, websites = {https://ingenieria.ute.edu.ec/enfoqueute/index.php/revista/article/view/125}, month = {2}, day = {24}, id = {84c6a89f-9fb6-3759-bea2-ac2e1e12423d}, created = {2020-12-29T22:52:13.125Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.748Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Moreno-Revelo2017a}, private_publication = {false}, abstract = {Una arritmia es una patología que consiste en la alteración de los latidos del corazón. A pesar de que el electrocardiograma de 12 derivaciones permite evaluar el comportamiento eléctrico del corazón para determinar ciertas patologías, existen algunas arritmias que son de difícil detección con este tipo de electrocardiografía. Por tanto, es necesario recurrir al uso del monitor Holter, debido a que facilita el registro de la actividad eléctrica del corazón durante largos periodos de tiempo, por lo general de 24 a 48 horas. Debido a la extensión de los registros proporcionados por el monitor, es común acudir al uso de sistemas computacionales para evaluar características diagnósticas y morfológicas de los latidos con el fin de determinar si existe algún tipo de anormalidad. Estos sistemas computacionales pueden basarse en técnicas supervisadas o no supervisadas de reconocimiento de patrones, pero teniendo en cuenta que en la primera opción el realizar una inspección visual de la gran cantidad de latidos presentes en un registro Holter, resulta ser una ardua tarea, además de implicar costos monetarios, en este trabajo se presenta el diseño de un sistema completo para la identificación de arritmias en registros Holter usando técnicas no supervisadas de reconocimiento de patrones. El sistema propuesto involucra etapas de pre-procesamiento de la señal, segmentación y caracterización de latidos, además de selección de características y agrupamiento. En este caso, la técnica utilizada es k-medias. Dichas etapas se aplican dentro del marco de una metodología basada en segmentos que mejora la detección de clases minoritarias. Asimismo, se considera criterios de inicialización que permiten mejorar las medidas de desempeño, en especial, la sensibilidad. Como resultado, se determina que usar k-medias con el criterio de inicialización máx-mín y un número de grupos igual a 12, permite obtener los mejores resultados, siendo: 99,36 %, 91,31 % y 99,16 % para exactitud, sensibilidad y especificidad, respectivamente.}, bibtype = {article}, author = {Moreno-Revelo, Mónica and Patascoy-Botina, Sandra and Pantoja-Buchelli, Andrés and Revelo Fuelagán, Javier and Rodríguez-Sotelo, José and Murillo-Rendón, Santiago and Peluffo-Ordoñez, Diego}, doi = {10.29019/enfoqueute.v8n1.125}, journal = {Enfoque UTE}, number = {1} }
@article{ title = {Theoretical developments for interpreting kernel spectral clustering from alternative viewpoints}, type = {article}, year = {2017}, keywords = {Kernel,Spectral clustering,Support vector machines}, pages = {1670-1676}, volume = {2}, websites = {http://astesj.com/v02/i03/p208/}, month = {8}, id = {7dd8e161-e516-3758-b04a-1f9f9272312b}, created = {2020-12-29T22:52:13.545Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.741Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2017}, private_publication = {false}, abstract = {To perform an exploration process over complex structured data within unsupervised settings, the so-called kernel spectral clustering (KSC) is one of the most recommended and appealing approaches, given its versatility and elegant formulation. In this work, we explore the relationship between (KSC) and other well-known approaches, namely normalized cut clustering and kernel k-means. To do so, we first deduce a generic KSC model from a primal-dual formulation based on least-squares support-vector machines (LS-SVM). For experiments, KSC as well as other consider methods are assessed on image segmentation tasks to prove their usability.}, bibtype = {article}, author = {Peluffo-Ordóñez, Diego and Rosero-Montalvo, Paul and Umaquinga-Criollo, Ana and Suárez-Zambrano, Luis and Domínguez-Limaico, Hernan and Oña-Rocha, Omar and Flores-Armas, Stefany and Maya-Olalla, Edgar}, doi = {10.25046/aj0203208}, journal = {Advances in Science, Technology and Engineering Systems Journal}, number = {3} }
@inproceedings{ title = {GreenFarm-DM: A tool for analyzing vegetable crops data from a greenhouse using data mining techniques (First trial)}, type = {inproceedings}, year = {2017}, keywords = {Big Data,KDD,Precision agriculture,data analytics,data mining}, pages = {1-6}, websites = {http://ieeexplore.ieee.org/document/8247519/}, month = {10}, publisher = {IEEE}, id = {af367fbf-5042-30a5-a24d-460ab4a987a3}, created = {2020-12-29T22:52:13.646Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.906Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Ponce-Guevara2017}, private_publication = {false}, abstract = {This work shows the use of Big Data and Data Mining techniques on vegetable crops data from a greenhouse by implementing the first version of a software tool, so called GreenFarm-DM. Such a tool is aimed at analyzing the factors that influence the growth of the crops, and determine a predictive model of soil moisture. Within a greenhouse, the variables that affect crop growth are: relative humidity, soil moisture, ambient temperature, and levels of illumination and CO2. These parameters are essential for photosynthesis, i.e. during processes where plants acquire the most nutrients, and therefore, if performing a good control on these parameters, plants might grow healthier and produce better fruits. The process of analysis of such factors in a data mining context requires designing an analysis system and establishing an objective variable to be predicted by the system. In this case, in order to optimize water resource expenditure, soil moisture has been chosen as the target variable. The proposed analysis system is developed in a user interface implemented in Java and NetBeans IDE 8.2, and consists mainly of two stages. One of them is the classification through algorithm C4.5 (chosen for the first trial), which uses a decision tree based on the data entropy, and allows to visualize the results graphically. The second main stage is the prediction, in which, from the classification results obtained in the previous stage, the target variable is predicted from information of a new set of data. In other words, the interface builds a predictive model to determine the behavior of soil moisture.}, bibtype = {inproceedings}, author = {Ponce-Guevara, K. L. and Palacios-Echeverria, J. A. and Maya-Olalla, E. and Dominguez-Limaico, H. M. and Suarez-Zambrano, L. E. and Rosero-Montalvo, P. D. and Peluffo-Ordonez, D. H. and Alvarado-Perez, J. C.}, doi = {10.1109/ETCM.2017.8247519}, booktitle = {2017 IEEE Second Ecuador Technical Chapters Meeting (ETCM)} }
@inbook{ type = {inbook}, year = {2017}, keywords = {Data visualization,Dimensionality reduction,Pairwise dissimilarity}, pages = {461-469}, websites = {http://link.springer.com/10.1007/978-3-319-68935-7_50}, id = {82bb5e94-d3af-3525-8eb9-99c1a0ef7c58}, created = {2020-12-29T22:52:13.839Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.826Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Pena-Unigarro2017}, private_publication = {false}, abstract = {This work describes a new model for interactive data visualization followed from a dimensionality-reduction (DR)-based approach. Particularly, the mixture of the resulting spaces of DR methods is considered, which is carried out by a weighted sum. For the sake of user interaction, corresponding weighting factors are given via an intuitive color-based interface. Also, to depict the DR outcomes while showing information about the input high-dimensional data space, the low-dimensional representations reached by the mixture is conveyed using scatter plots enhanced with an interactive data-driven visualization. In this connection, a constrained dissimilarity approach define the graph to be drawn on the scatter plot.}, bibtype = {inbook}, author = {Peña-Unigarro, D. F. and Rosero-Montalvo, P. and Revelo-Fuelagán, E. J. and Castro-Silva, J. A. and Alvarado-Pérez, J. C. and Therón, R. and Ortega-Bustamante, C. M. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-319-68935-7_50}, chapter = {Interactive Data Visualization Using Dimensionality Reduction and Dissimilarity-Based Representations}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Razonamiento basado en casos aplicado al diagnóstico médico utilizando clasificadores multi-clase: Un estudio preliminar}, type = {article}, year = {2017}, pages = {232-243}, volume = {8}, websites = {https://ingenieria.ute.edu.ec/enfoqueute/index.php/revista/article/view/141}, month = {2}, day = {24}, id = {814d869e-f27f-3426-a022-1512f0246d37}, created = {2020-12-29T22:52:14.762Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.434Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Viveros-Melo2017}, private_publication = {false}, abstract = {CBR ha demostrado ser apropiado para trabajar con datos de dominios poco estructurados o situaciones donde es difícil la adquisición de conocimiento, como es el caso del diagnóstico médico, donde es posible identificar enfermedades como: cáncer, predicción de epilepsia y diagnóstico de apendicitis. Algunas de las tendencias que se pueden desarrollar para CBR en la ciencia de la salud están orientadas a reducir el número de características en datos de gran dimensión. Una contribución importante puede ser la estimación de probabilidades de pertenencia a cada clase para los nuevos casos. Con el fin de representar adecuadamente la base de datos y evitar los inconvenientes causados por la alta dimensión, ruido y redundancia de los mimos, en este trabajo, se utiliza varios algoritmos en la etapa de pre-procesamiento para realizar una selección de variables y reducción de dimensiones. Además, se realiza una comparación del rendimiento de algunos clasificadores multi-clase representativos para identificar el más eficaz e incluirlo en un esquema CBR. En particular, se emplean cuatro técnicas de clasificación y dos técnicas de reducción para hacer un estudio comparativo de clasificadores multi-clase sobre CBR}, bibtype = {article}, author = {Viveros-Melo, D. and Ortega-Adarme, M. and Blanco Valencia, X. and Castro-Ospina, A. E. and Murillo Rendón, S. and Peluffo-Ordóñez, D. H.}, doi = {10.29019/enfoqueute.v8n1.141}, journal = {Enfoque UTE}, number = {1} }
@inbook{ type = {inbook}, year = {2017}, keywords = {Multi-labeler classification,Supervised kernel,Support vector machines}, pages = {343-351}, websites = {http://link.springer.com/10.1007/978-3-319-52277-7_42}, id = {a0480f2a-1158-3dcd-b6eb-9b495ccf9633}, created = {2020-12-29T22:52:14.822Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.588Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Imbajoa-Ruiz2017}, private_publication = {false}, abstract = {This work introduces a multi-labeler kernel novel approach for data classification learning from multiple labelers. The learning process is done by training support-vector machine classifiers using the set of labelers (one labeler per classifier). The objective functions representing the boundary decision of each classifier are mixed by means of a linear combination. Followed from a variable relevance, the weighting factors are calculated regarding kernel matrices representing each labeler. To do so, a so-called supervised kernel function is also introduced, which is used to construct kernel matrices. Our multi-labeler method reaches very good results being a suitable alternative to conventional approaches.}, bibtype = {inbook}, author = {Imbajoa-Ruiz, D. E. and Gustin, I. D. and Bolaños-Ledezma, M. and Arciniegas-Mejía, A. F. and Guasmayan-Guasmayan, F. A. and Bravo-Montenegro, M. J. and Castro-Ospina, A. E. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-319-52277-7_42}, chapter = {Multi-labeler Classification Using Kernel Representations and Mixture of Classifiers}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Internet de las Cosas y Visión Artificial, Funcionamiento y Aplicaciones: Revisión de Literatura}, type = {article}, year = {2017}, pages = {244-256}, volume = {8}, websites = {https://ingenieria.ute.edu.ec/enfoqueute/index.php/revista/article/view/121}, month = {2}, day = {24}, id = {380fe3e5-e4a0-3f46-bcbe-9809acb89767}, created = {2020-12-29T22:52:16.034Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.086Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Alvear-Puertas2017}, private_publication = {false}, abstract = {Internet de las cosas (o también conocido como IoT) es una de las tecnologías más nombradas en la actualidad debido a la capacidad que prevé para conectar todo tipo de dispositivos al Internet, si a las potencialidades de IoT le adicionamos otra tecnología de alto impacto como lo es la Visión Artificial tenemos un amplio campo de aplicaciones innovadoras, donde el procesamiento de imágenes y video en tiempo real permiten la visualización de grandes cantidades de datos en internet. Las principales aplicaciones que se desarrollan con IoT y Visión Artificial pueden ser implementadas en educación, medicina, edificios inteligentes, sistemas de vigilancia de personas y vehículos, entre otros. Este tipo de aplicaciones mejoran la calidad de vida de los usuarios, sin embargo, para su desarrollo se requiere una infraestructura que permita la convergencia de diferentes protocolos y dispositivos, pero de manera especial que puedan manejar las diferentes fases de la adquisición de imágenes. En este trabajo se ha realizado una revisión de los inicios, conceptos, tecnologías y aplicaciones ligados a la Visión Artificial con el Internet de las Cosas para poder comprender de forma precisa el impacto de sus aplicaciones en la vida cotidiana.}, bibtype = {article}, author = {Alvear-Puertas, Vanessa and Rosero-Montalvo, Paul and Peluffo-Ordóñez, Diego and Pijal-Rojas, José}, doi = {10.29019/enfoqueute.v8n1.121}, journal = {Enfoque UTE}, number = {1} }
@inbook{ type = {inbook}, year = {2017}, keywords = {Biometric,Classifiers mixture,Multimodal system,Physiological signals,Signal processing}, pages = {436-443}, websites = {http://link.springer.com/10.1007/978-3-319-68935-7_47}, id = {60b5db4f-bbc4-3e95-b53e-aa70cf520663}, created = {2020-12-29T22:52:16.154Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.099Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Moreno-Revelo2017}, private_publication = {false}, abstract = {The biometric is an open research field that requires analysis of new techniques to increase its accuracy. Although there are active biometric systems for subject identification, some of them are considered vulnerable to be fake such as a fingerprint, face or palm-print. Different biometric studies based on physiological signals have been carried out. However, these can be regarded as limited. So, it is important to consider that there is a need to perform an analysis among them and determine the effectivity of each one and proposed new multimodal biometric systems. In this work is presented a comparative study of 40 physiological signals from a multimodal analysis. First, a preprocessing and feature extraction was carried out using Hermite coefficients, discrete wavelet transform, and statistical measures of them. Then, feature selection was applied using two selectors based on Rough Set algorithms, and finally, classifiers and a mixture of five classifiers were used for classification. The more relevant results shown an accuracy of 97.7% from 3 distinct EEG signals, and an accuracy of 100% using 40 different physiological signals (32 EEG, and eight peripheral signals).}, bibtype = {inbook}, author = {Moreno-Revelo, M. and Ortega-Adarme, M. and Peluffo-Ordoñez, D. H. and Alvarez-Uribe, K. C. and Becerra, M. A.}, doi = {10.1007/978-3-319-68935-7_47}, chapter = {Comparison Among Physiological Signals for Biometric Identification}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Morfologías visuales de representación de datos como aumento de capacidades analíticas humanas: Una revisión de literatura}, type = {inproceedings}, year = {2017}, websites = {https://www.researchgate.net/publication/322509217_Morfologias_visuales_de_representacion_de_datos_como_aumento_de_capacidades_analiticas_humanas_Una_revision_de_literatura}, id = {c0bc692d-51ec-3439-bed1-053614770ea9}, created = {2020-12-29T22:52:16.708Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.717Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Isaza2017}, private_publication = {false}, abstract = {La representación visual de datos es una técnica de extracción de conocimiento que permite tener una percepción de toda la información disponible dentro de Big Data. Para lograr el objetivo de captación de la atención humana, es necesario representar el conjunto de datos de una manera intuitiva. De esta forma, el usuario pueda tomar decisiones adecuadas. Consecuentemente, el mejoramiento de la experiencia humano computador se basa en el uso de técnicas de análisis de datos, donde los recursos computacionales deben ser optimizados. En este trabajo, se desarrolla una metodología con una investigación de tipo descriptivo, exploratorio y documental con los diferentes enfoques de visualización de datos orientados al análisis exploratorio para el descubrimiento científico y aumento de las capacidades humanas como apoyo a las decisiones automáticas. Abstract Visual representation is an approach to extract knowledge, which enables users to perceive the information whitin a context of Big Data. To involve the human perception into the data analysis, an inuitive data representation is needed. Consequently, any user will be able to make more adequate decisions. Indeed, the enhancement of human-computer interaction is based on the use of data anaylisis techniques while computational broad is optimized. In this work, a descriptive, exploratory and documental methodology is presented aimed at highlighting the benefit of involving the human skills within the process of automatic decision making.}, bibtype = {inproceedings}, author = {Isaza, Andrés Javier Anaya and Criollo, Ana Cristina Umaquinga and Olmedo, Gabriela Narváez and Montalvo, Paúl David Rosero and Ordóñez, Diego Hernán Peluffo}, booktitle = {I Congreso Internacional de Ingenierías 2017} }
@inbook{ type = {inbook}, year = {2017}, keywords = {Kernel spectral clustering,Motion segmentation,Time-varying data,Variable ranking}, pages = {406-414}, websites = {http://link.springer.com/10.1007/978-3-319-68935-7_44}, id = {3f061daf-b487-3b08-aee1-f05023fe458b}, created = {2020-12-29T22:52:16.927Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.737Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Ona-Rocha2017}, private_publication = {false}, abstract = {Dynamic or time-varying data analysis is of great interest in emerging and challenging research on automation and machine learning topics. In particular, motion segmentation is a key stage in the design of dynamic data analysis systems. Despite several studies have addressed this issue, there still does not exist a final solution highly compatible with subsequent clustering/classification tasks. In this work, we propose a motion segmentation compatible with kernel spectral clustering (KSC), here termed KSC-MS, which is based on multiple kernel learning and variable ranking approaches. Proposed KSC-MS is able to automatically segment movements within a dynamic framework while providing robustness to noisy environments.}, bibtype = {inbook}, author = {Oña-Rocha, O. R. and Sánchez-Manosalvas, O. T. and Umaquinga-Criollo, A. C. and Rosero-Montalvo, P. D. and Suárez-Zambrano, L. E. and Rodríguez-Sotelo, J. L. and Peluffo-Ordóñez, D. H.}, doi = {10.1007/978-3-319-68935-7_44}, chapter = {Automatic Motion Segmentation via a Cumulative Kernel Representation and Spectral Clustering}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2017}, keywords = {ANFIS,PM2.5 estimation,Support vector regression}, pages = {426-433}, websites = {http://link.springer.com/10.1007/978-3-319-52277-7_52}, id = {ef978dd1-5e34-3586-8953-c079ad7b7d06}, created = {2020-12-29T22:52:17.079Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.857Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Becerra2017}, private_publication = {false}, abstract = {Nowadays, an important decrease in the quality of the air has been observed, due to the presence of contamination levels that can change the natural composition of the air. This fact represents a problem not only for the environment, but also for the public health. Consequently, this paper presents a comparison among approaches based on Adaptive Neural Fuzzy Inference System (ANFIS) and Support Vector Regression (SVR) for the estimation level of PM2.5 (Particle Material 2.5) in specific geographic locations based on nearby stations. The systems were validated using an environmental database that belongs to air quality network of Valle de Aburrá (AMVA) of Medellin Colombia, which has the registration of 5 meteorological variables and 2 pollutants that are from 3 nearby measurement stations. Therefore, this project analyses the relevance of the characteristics obtained in every single station to estimate the levels of PM2.5 in the target station, using four different selectors based on Rough Set Feature Selection (RSFS) algorithms. Additionally, five systems to estimate the PM2.5 were compared: three based on ANFIS, and two based on SVR to obtain an aim and an efficient mechanism to estimate the levels of PM2.5 in specific geographic locations fusing data obtained from the near monitoring stations.}, bibtype = {inbook}, author = {Becerra, Miguel A. and Sánchez, Marcela Bedoya and Carvajal, Jacobo García and Luna, Jaime A. Guzmán and Peluffo-Ordóñez, Diego H. and Tobón, Catalina}, doi = {10.1007/978-3-319-52277-7_52}, chapter = {Data Fusion from Multiple Stations for Estimation of PM2.5 in Specific Geographical Location}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Prototype reduction algorithms comparison in nearest neighbor classification for sensor data: Empirical study}, type = {inproceedings}, year = {2017}, keywords = {Knn,Prototype selection,Sensor data}, pages = {1-5}, websites = {http://ieeexplore.ieee.org/document/8247530/}, month = {10}, publisher = {IEEE}, id = {8cbf93de-9541-323d-9cda-147c433908dd}, created = {2021-03-29T21:51:29.514Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-05-10T23:20:54.871Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2017b}, private_publication = {false}, abstract = {This work presents a comparative study of prototype selection (PS) algorithms. Such a study is done over data-from-sensor acquired by an embedded system. Particularly, five flexometers are used as sensors, which are located inside a glove aimed to read sign language. Measures were taken to quantify the balance between classification performance and reduction training set data (QCR) with k neighbors equal to 3 and 1 to force the classifier (kNN) to the maximum. Two tests were used: (a)the QCR performance and (b) the embedded system decision in real proves. As result the Random Mutation Hill Climbing (RMHC) algorithm is considered the best option to choose in this data type with removed instances at 87% and classification performance at 82% in software tests, also the classifier kNN must be with k=3 to improve the classification performance. In a real situation, with the algorithm implemented. The system makes correct decisions at 81% with 5 persons doing sign language in real time.}, bibtype = {inproceedings}, author = {Rosero-Montalvo, Paul and Peluffo-Ordonez, Diego H. and Umaquinga, Ana and Anaya, Andres and Serrano, Jorge and Rosero, Edwin and Vasquez, Carlos and Suarez, Luis}, doi = {10.1109/ETCM.2017.8247530}, booktitle = {2017 IEEE 2nd Ecuador Technical Chapters Meeting, ETCM 2017} }
@inproceedings{ title = {Data visualization using interactive dimensionality reduction and improved color-based interaction model}, type = {inproceedings}, year = {2017}, keywords = {Color-based model,Data visualization,Dimensionality reduction,Pairwise similarity}, pages = {289-298}, volume = {10338 LNCS}, websites = {https://link.springer.com/chapter/10.1007%2F978-3-319-59773-7_30}, publisher = {Springer Verlag}, id = {17ef4734-c6b8-3ac1-a5b5-29a4be7e7dff}, created = {2021-11-10T17:26:02.270Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-10T17:29:44.706Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This work presents an improved interactive data visualization interface based on a mixture of the outcomes of dimensionality reduction (DR) methods. Broadly, it works as follows: The user can input the mixture weighting factors through a visual and intuitive interface with a primary-light-colors-based model (Red, Green, and Blue). By design, such a mixture is a weighted sum of the color tone. Additionally, the low-dimensional representation space produced by DR methods are graphically depicted using scatter plots powered via an interactive data-driven visualization. To do so, pairwise similarities are calculated and employed to define the graph to simultaneously be drawn over the scatter plot. Our interface enables the user to interactively combine DR methods by the human perception of color, while providing information about the structure of original data. Then, it makes the selection of a DR scheme more intuitive -even for non-expert users.}, bibtype = {inproceedings}, author = {Rosero-Montalvo, P. D. and Peña-Unigarro, D. F. and Peluffo, D. H. and Castro-Silva, J. A. and Umaquinga, A. and Rosero-Rosero, E. A.}, doi = {10.1007/978-3-319-59773-7_30}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Auditoría de seguridad informática siguiendo la metodología OSSTMMv3: caso de estudio}, type = {article}, year = {2016}, keywords = {auditoria,canales,controles,limitaciones,ostmm,porosidad,seguridad}, websites = {https://www.researchgate.net/publication/321532783_Auditoria_de_seguridad_informatica_siguiendo_la_metodologia_OSSTMMv3_caso_de_estudio}, id = {4a2ca842-3358-3aa8-836c-d232b932fb01}, created = {2020-12-29T22:52:03.720Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.080Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Bracho-Ortega2016}, private_publication = {false}, abstract = {This article explains the methodology followed in computing auditing in terms of security where OSSTMM version 3 was taken as reference methodology and implemented in GAD-Mira. The current methodology allows to measure the security aspects of five different channels such as human, physical, wireless communications, telecommunications, and networking. At the same time, it includes porosity (OpSec), controls, and limitations as three important measures in each channel which allow to calculate and get the numerical values that explain the importance and influence of each item in the computing audit. Additionally, results obtained after the application of the described methodology allowed to understand deficiencies or excesses in terms of security controls that exist in a company or organization in each channel, being an important point to analyze the internal vulnerabilities that need to be solved. Keywords: OSSTMM, porosity, controls, limitations, channels, audit, security.}, bibtype = {article}, author = {Bracho-Ortega, C. and Cuzme-Rodríguez, F. and Pupiales-Yépez, C. and Suárez-Zambrano, L. and Peluffo-Ordóñez, D. and Moreira-Zambrano, C}, journal = {Maskana} }
@inproceedings{ title = {Propuesta de análisis visual de datos en Big Data usando reducción de dimensión interactiva}, type = {inproceedings}, year = {2016}, websites = {https://www.diegopeluffo.com/publicaciones/2016_JornadasFica_BigData.pdf}, id = {daeff509-9dc2-3a34-9ef8-96a486d8449c}, created = {2020-12-29T22:52:04.781Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.059Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {A.C.Umaquinga-CriolloD.H.Peluffo-Ordonez2016}, private_publication = {false}, abstract = {En la actualidad se puede evidenciar un crecimiento exponencial del volumen de datos, dando lugar al área emergente denominada Big Data. Paralelamente a este crecimiento, ha aumentado la demanda de herramientas, técnicas y dispositivos para almacenar, transmitir y procesar datos de alta dimensión. La mayoría de metodologías existentes para procesar datos de alta dimensión producen resultados abstractos y no envuelven al usuario en la elección o sintonización las técnicas de análisis. En este trabajo proponemos una metodología de análisis visual de Big Data con principios de interactividad y controlabilidad de forma que usuarios (incluso aquellos no expertos) puedan seleccionar intuitivamente un método de reducción de dimensión para generar representaciones inteligibles para el ser humano.}, bibtype = {inproceedings}, author = {A. C. Umaquinga-Criollo D. H. Peluffo-Ordóñez, M V Cabrera-Álvarez J C Alvarado-Pérez A J Anaya-Isaza}, booktitle = {3ras Jornadas Internacionales FICA} }
@article{ title = {Estudio comparativo de métodos espectrales para reducción de la dimensionalidad: LDA versus PCA . Comparative study between spectral methods for dimension reduction LDA versus PCA.}, type = {article}, year = {2016}, keywords = {Análisis de componentes principales,Análisis discriminante lineal,Aprendizaje de máquina,Clasificación lineal,Clasificación supervisada,Métodos de reducción de la dimensión}, websites = {http://ingenieria.ute.edu.ec/conferencias/index.php/inciscos/2016/paper/view/31}, id = {edb274a9-f02c-396e-80f9-6c5b43c86307}, created = {2020-12-29T22:52:04.796Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.004Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Anaya-Isaza2016}, private_publication = {false}, abstract = {Este trabajo presenta un estudio comparativo con métodos de reducción de la dimensión lineal,tales como: Análisis de Componentes Principales &Análisis Discriminante Lineal. El estudio pretende determinar, bajo criterios de objetividad, cuál de estas técnicas obtiene el mejor resultado de separabilidad entre clases. Para la validación experimental se utilizan dos bases de datos, del repositorio científico(UC Irvine Machine Learning Repository), para dar tratamiento a los atributos del data-set en función deconfirmar visualmente la calidad de los resultados obtenidos. Las inmersiones obtenidas son analizadas, para realizar una comparación de resultados del embedimiento representados con RNX(K), que permite evaluar el área bajo la curva, del cual asume una mejor representación en una topología global o localque posteriormente generalos gráficos de visualización en un espacio de menor dimensión, para observar la separabilidad entre clases conservando la estructura global de los datos.}, bibtype = {article}, author = {Anaya-Isaza, Andrés J and Peluffo-Ordoñez, Diego H and Alvarado-Pérez, Juan C and Ivan-Rios, Jorge and Castro-Silva, Juan A and Rosero-Montalvo, Paul D and Peña-Unigarro, Diego F and C., Jose Salazar-Castrojuan A and Umaquinga-Criollo, Ana C}, journal = {INCISCOS 2016 International Conference on Information Systems and Computer Science} }
@article{ title = {Arquitectura, servicios y aplicaciones de Business Intelligence : Revisión de literatura}, type = {article}, year = {2016}, keywords = {big data,business,business intelligence,competitive}, websites = {https://www.diegopeluffo.com/publicaciones/2016_INCISCOS_ArquiteturasBI.pdf}, id = {7a957cd7-fa42-304b-bda9-b2743960f6b9}, created = {2020-12-29T22:52:05.225Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.157Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {RoseroMontalvo2016}, private_publication = {false}, abstract = {Business Intelligence (BI) es el conjunto de estrategias y herramientas para analizar gran cantidad de volúmenes de datos con el fin de encontrar patrones o tendencias de consumo de las personas y establecer estrategias de negocio, para lograr este objetivo es necesario contar con servicios y aplicaciones como RealTime BI, Social BI, Cloud BI, BI 3.0, Business Analitics y Mobile BI. Todo el proceso de BI es soportado por diferentes análisis que implementan algoritmos de machine learning en grandes volúmenes y diferentes fuentes de datos, considerado como Big Data. En este trabajo se hace relación a las funcionalidades y los requerimientos necesarios de BI desde un concepto inicial hasta conceptos específicos y herramientas para su implementación.}, bibtype = {article}, author = {Rosero Montalvo, Paul and Rosero Rosero, Edwin and Peluffo Órdonez, Diego and Beltrán, Luis}, journal = {ResearchGate} }
@article{ title = {Visualización y métodos kernel: Integrando inteligencia natural y artificial}, type = {article}, year = {2016}, websites = {https://www.diegopeluffo.com/publicaciones/2016_CongresoInternacionallng_Kernel.pdf}, id = {79c5ffea-848c-3412-92f6-341c3bdbb73c}, created = {2020-12-29T22:52:13.338Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.725Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Alvarado-Perez2016}, private_publication = {false}, abstract = {Resumen Los enormes volúmenes de datos, generados por la actividad académica, científica, empresarial e industrial, entre muchas más, contienen información muy valiosa, lo que hace necesario desarrollar procesos y técnicas robustas, de validez científica que permitan explorar esas grandes cantidades de datos de manera óptima, con el propósito de obtener información relevante para la generación de nuevo conocimiento y toma de decisiones acertadas. La robustez y altas capacidades de procesamiento computacional de las maquinas modernas son aprovechadas por áreas como la inteligencia artificial que si se integra de forma holística con la inteligencia natural, es decir, si se combina sinérgicamente los métodos sofisticados de análisis de datos con los conocimientos, habilidades y flexibilidad de la razón humana, es posible generar conocimiento de forma más eficaz. La visualización de información propone formas eficientes de llevar los resultados generados por los algoritmos a la comprensión humana, la cual permite encontrar tendencias y patrones ocultos de forma visual, los cuales pueden formar la base de modelos predictivos que permitan a los analistas producir nuevas observaciones y consideraciones a partir de los datos existentes, mejorando el desempeño de los sistemas de aprendizaje automático, haciendo más inteligibles los resultados y mejorando la interactividad y controlabilidad por parte del usuario. Sin embargo, la tarea de presentar y/o representar datos de manera comprensible, intuitiva y dinámica, no es un tarea trivial; uno de los mayores problemas que enfrenta la visualización es la alta dimensión de los datos, entendiendo dimensión o como el número de variables o atributos que caracterizan a un objeto. Una solución efectiva son los métodos de reducción de dimensión (RD) que permiten representar los datos originales en alta dimensión en dimensiones inteligibles para el ser humano (2D o 3D). En la actualidad, los métodos kernel representan una buena alternativa de RD debido su versatilidad y fácil implementación en entornos de programación. En este trabajo se presenta una breve descripción y forma de uso de un método generalizado conocido como análisis de componentes principales basado en kernel (KPCA). Palabras claves: Inteligencia artificial, inteligencia natural, kernel PCA, reducción de dimensión. Abstract. The large amount of data generated by different activities -academic, scientific, business and industrial activities, among others-contains meaningful information that allows developing processes and techniques, which have scientific validity to optimally explore such information. Doing so, we get new knowledge to properly make decisions. The robustness and high computational processing capabilities of modern machines are used by areas such as artificial intelligence, if holistically integrates with the natural intelligence, in other words, if is synergistically combines sophisticated data analysis methods as well as the knowledge, skills and flexibility of human reasoning, it is possible to discover knowledge in a more effective way. "Information Visualization" is an efficient way to bring the results generated by the algorithms to human understanding in order to find hidden trends and patterns belonging visually to the predictive model, that allow analysts to produce new observations and considerations from existing data, improving the performance of machine learning systems, making it more intelligible results and improving interactivity and controllability by the user. Nonetheless, the task of presenting and/or represent data in an understandable, intuitive and dynamic way, is not a trivial task, one of the biggest problems that visualization faces is the high dimension -being dimension the number of variables or attributes that characterize an object. An effective solution is the dimensionality reduction methods (RD) that can represent the original, high-dimensional data as a space whose dimension is intelligible to humans (2D or 3D). Currently, the kernel methods represent a good alternative because of their versatility and ability to make RD algorithms easily implementable in programming environments. This paper presents a brief description and method of use of a generalized method, so-called kernel principal component analysis (KPCA).}, bibtype = {article}, author = {Alvarado-Pérez, Juan C. and Peluffo-Ordóñez, Diego H. and Theron, Roberto}, journal = {N/a} }
@inproceedings{ title = {Dimensionality reduction for interactive data visualization via a Geo-Desic approach}, type = {inproceedings}, year = {2016}, keywords = {controllability,data information,data visualization,dimensionality reduction,intelligible data,interaction,interface}, pages = {1-6}, websites = {http://ieeexplore.ieee.org/document/7885740/}, month = {11}, publisher = {IEEE}, id = {4a0172a1-8deb-3167-836c-5c55679887c7}, created = {2020-12-29T22:52:13.435Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.750Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Salazar-Castro2016}, private_publication = {false}, abstract = {This work presents a dimensionality reduction (DR) framework that enables users to perform either the selection or mixture of DR methods by means of an interactive model, here named Geo-Desic approach. Such a model consists of linear combination of kernel-based representations of DR methods, wherein the corresponding coefficients are related to coordinated latitude and longitude inside of the world map. By incorporating the Geo-Desic approach within an interface, the combination may be made easily and intuitively by users -even non-expert ones- fulfilling their criteria and needs, by just picking up points from the map. Experimental results demonstrates the usability and ability of DR methods representation of proposed approach.}, bibtype = {inproceedings}, author = {Salazar-Castro, Jose A. and Pena-Unigarro, Diego and Peluffo-Ordonez, Diego H. and Rosero-Montalvo, Paul D. and Dominguez-Limaico, H. Mauricio and Alvarado-Perez, Juan C. and Theron, Roberto}, doi = {10.1109/LA-CCI.2016.7885740}, booktitle = {2016 IEEE Latin American Conference on Computational Intelligence (LA-CCI)} }
@inproceedings{ title = {Comparison between unipolar and bipolar electrograms for detecting rotor tip from 2D fibrillation model using image fusion. A simulation study}, type = {inproceedings}, year = {2016}, keywords = {Atrial Fibrillation,Electrograms,Image Fusion,Rotor Tip}, pages = {1-6}, websites = {https://ieeexplore.ieee.org/document/7885712/}, month = {11}, publisher = {IEEE}, id = {9146ce52-1a1f-37a1-8c3c-4a53d2f7a547}, created = {2020-12-29T22:52:13.541Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.742Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Duarte-Salazar2016}, private_publication = {false}, abstract = {The atrial fibrillation (AF) is the most common arrhythmia, which generates the highest costs on clinical systems. Theory of the rotor is one of the most recent approaches to explain the mechanisms that maintain AF. The most promising treatment is the ablation, whose success depends on rotor tip location. In a previous research, the approximate entropy (ApEn) calculated on simulated electrograms from atrial models has shown high capability for detecting the rotor tip, however it needed a human final adjustment. In addition, this technique involves a high computational cost, which is a problem for its effective application. In this study, multiple features maps were generated and different combinations of them were conducted using wavelet image fusion. The rotor tip location when using image fusion, was similar to the results achieved with the methodology based on ApEn, however, our methodology did not require any manual adjustment, and the computational cost was reduced to 85%. This study includes a comparative analysis between unipolar and bipolar electrograms obtained from a simulated 2D model of a human atrial tissue under chronic AF.}, bibtype = {inproceedings}, author = {Duarte-Salazar, Carlos A. and Orozco-Duque, Andres and Tobon, Catalina and Peluffo-Ordonez, Diego H. and Guzman Luna, Jaime A. and Becerra, Miguel A.}, doi = {10.1109/LA-CCI.2016.7885712}, booktitle = {2016 IEEE Latin American Conference on Computational Intelligence (LA-CCI)} }
@inbook{ type = {inbook}, year = {2016}, keywords = {Dimensionality reduction,Generalized kernel formulation,Kernel PCA,Spectral clustering,Support vector machine}, pages = {255-264}, websites = {http://link.springer.com/10.1007/978-3-319-40162-1_28}, id = {b526693a-a1ce-3814-ab3e-9aa22b62c01c}, created = {2020-12-29T22:52:14.769Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.600Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2016}, private_publication = {false}, abstract = {This paper presents the development of a unified view of spectral clustering and unsupervised dimensionality reduction approaches within a generalized kernel framework. To do so, the authors propose a multipurpose latent variable model in terms of a high-dimensional representation of the input data matrix, which is incorporated into a least-squares support vector machine to yield a generalized optimization problem. After solving it via a primal-dual procedure, the final model results in a versatile projected matrix able to represent data in a low-dimensional space, as well as to provide information about clusters. Specifically, our formulation yields solutions for kernel spectral clustering and weighted-kernel principal component analysis.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, D. H. and Becerra, M. A. and Castro-Ospina, A. E. and Blanco-Valencia, X. and Alvarado-Pérez, J. C. and Therón, R. and Anaya-Isaza, A.}, doi = {10.1007/978-3-319-40162-1_28}, chapter = {On the Relationship Between Dimensionality Reduction and Spectral Clustering from a Kernel Viewpoint}, title = {Advances in Intelligent Systems and Computing} }
@inproceedings{ title = {Interactive visualization methodology of high-dimensional data with a color-based model for dimensionality reduction}, type = {inproceedings}, year = {2016}, pages = {1-7}, websites = {http://ieeexplore.ieee.org/document/7743318/}, month = {8}, publisher = {IEEE}, id = {224d230a-d13c-3fea-a320-e4b64045e23a}, created = {2020-12-29T22:52:16.169Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.120Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Pena-unigarro2016}, private_publication = {false}, abstract = {Nowadays, a consequence of data overload is that world's technology capacity to collect, communicate, and store large volumes of data is increasing faster than human analysis skills. Such an issue has motivated the development of graphic ways to visually represent and analyze high-dimensional data. Particularly, in this work, we propose a graphical interface that allow the combination of dimensionality reduction (DR) methods using a chromatic model to make data visualization more intelligible for humans. This interface is designed for an easy and interactive use, so that input parameters are given by the user via the selection of RGB values inside a given surface. Proposed interface enables (even non-expert) users to intuitively either select a concrete DR method or carry out a mixture of methods. Experimental results proves the usability of our interface making the selection or configuration of a DR-based visualization an intuitive and interactive task for the user.}, bibtype = {inproceedings}, author = {Pena-unigarro, Diego F. and Salazar-Castro, Jose A. and Peluffo-Ordonez, Diego H. and Rosero-Montalvo, Paul D. and Ona-Rocha, Omar R. and Isaza, Andres A. and Alvarado-Perez, Juan C. and Theron, Roberto}, doi = {10.1109/STSIVA.2016.7743318}, booktitle = {2016 XXI Symposium on Signal Processing, Images and Artificial Vision (STSIVA)} }
@article{ title = {Human sit down position detection using data classification and dimensionality reduction}, type = {article}, year = {2016}, pages = {749-754}, volume = {2}, websites = {https://ieeexplore.ieee.org/document/7750822}, publisher = {ASTES Publishers}, id = {71889da5-6891-3ef4-a493-0aa0bea3f6e7}, created = {2021-03-29T22:13:53.124Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T22:13:53.124Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rosero-Montalvo2016749}, source_type = {article}, notes = {cited By 5}, private_publication = {false}, abstract = {The analysis of human sit down position is a research area allows for preventing health physical problems in the back. Many works have proposed systems that detect the sitting position, some open issues are still to be dealt with, such as: Cost, computational load, accuracy, portability, and among others. In this work, we present an alternative approach based on an embedded system to acquire the position-related variables and machine learning techniques, namely dimensionality reduction (DR) and classification. Since the information acquired by sensors is high-dimensional and therefore it might not be saved into embedded system memory, for this reason the system has a DR stage based on principal component analysis (PCA) is performed. Subsequently, the posed detection is carried out by the k-nearest neighbors (KNN) classifier between the matrix stored in the system and new data acquired by pressure and distance sensors. Thus, regarding using the whole data set, the computational cost is decreased by 33 % as well as the data reading is reduced by 10 ms. Then, sitting-pose detection task takes 26 ms, and reaches 75% of accuracy in a 4-trial experiment. © 2017 ASTES Publishers. All rights reserved.}, bibtype = {article}, author = {Rosero-Montalvo, P and Jaramillo, D and Flores, S and Peluffo, D and Alvear, V and Lopez, M}, doi = {10.25046/aj020395}, journal = {Advances in Science, Technology and Engineering Systems}, number = {3} }
@inproceedings{ title = {Different perspectives for kernel spectral clustering: A theoretical study}, type = {inproceedings}, year = {2016}, keywords = {Kernel,spectral clustering,support vector machines}, websites = {https://ieeexplore.ieee.org/document/7750849}, id = {df791591-abdf-3e63-b467-2abbf0bec55b}, created = {2021-03-29T22:13:53.128Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-10-30T02:50:28.954Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {© 2016 IEEE. Spectral clustering is a suitable technique to deal with problems involving unlabeled clusters and having a complex structure, being kernel-based approaches the most recommended ones. This work aims at demonstrating the relationship between a widely-recommended method, so-named kernel spectral clustering (KSC) and other well-known approaches, namely normalized cut clustering and kernel k-means. Such demonstrations are done by following a primal-dual scheme. Also, we mathematically and experimentally prove the usability of using LS-SVM formulations with a model. Experiments are conducted to assess the clustering performance of KSC and the other considered methods on image segmentation tasks.}, bibtype = {inproceedings}, author = {Peluffo, D. H. and Rosero, P. D. and Pupiales, C. H. and Suarez, L. E. and Jaramillo, E. D. and Maya, E. A. and Michilena, J. R. and Vasquez, C. A.}, doi = {10.1109/ETCM.2016.7750849}, booktitle = {2016 IEEE Ecuador Technical Chapters Meeting, ETCM 2016} }
@inproceedings{ title = {Multi-Labeller classification Method based on Mixture of Classifiers and Genetic Algorithm Optimization}, type = {inproceedings}, year = {2016}, websites = {https://www.semanticscholar.org/paper/Multi-Labeller-classification-Method-based-on-of-Imbajoa-Gustin/86644681587c0cb659cf28d0434f12aa8e6d6425}, id = {843534ca-c2ca-3f43-bf2b-0fdef5a67ebe}, created = {2021-06-08T01:06:19.507Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-06-08T01:06:22.834Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This work presents a new method proposal applied to Multi-Labelers scenarios. This is a situation where labelling individuals in a set of data based on certain characteristics in the process of determining labels to individuals in a set of data based on certain characteristics. Our approach consists in processing a Support Vector Machine classifier to each labelers substantiated on his answers. We formulate a genetic algorithm optimization to obtain a set of weights according to their opinion, in order to penalize each panelist. Finally, their resulting mappings are mixed, and a final classifier is generated, showing to be better than majority vote. For experiments, the well-known Iris database is handled, with multiple simulated artificial labels. The proposed method reaches very good results compared to conventional multi- labeler methods, able to assess the concordance among panelists considering the structure data.}, bibtype = {inproceedings}, author = {Arciniegas Mejía, Andrés and Imbajoa, David and Gustin, Iván and Bolaños, Mauricio and F., Dario and Guasmayan, Fredy and Bravo Montenegro, María and Peluffo, Diego}, booktitle = {INCISCOS 2016 International Conference on Information Systems and Computer Science} }
@inbook{ type = {inbook}, year = {2015}, keywords = {Dimensionality reduction,Generalized kernel,Kernel PCA,Multiple kernel learning}, pages = {626-634}, websites = {http://link.springer.com/10.1007/978-3-319-25751-8_75}, id = {bb298d7f-a578-39f4-9229-58b74ecdad49}, created = {2020-12-29T22:52:03.092Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.763Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2015c}, private_publication = {false}, abstract = {This work introduces a multiple kernel learning (MKL) approach for selecting and combining different spectralmethods of dimensionality reduction (DR).From a predefined set of kernels representing conventional spectralDRmethods, a generalized kernel is calculated by means of a linear combination of kernel matrices. Coefficients are estimated via a variable ranking aimed at quantifying how much each variable contributes to optimize a variance preservation criterion. All considered kernels are testedwithinakernelPCAframework.Theexperiments are carriedoutover well-known real and artificial data sets. The performance of compared DR approaches is quantified by a scaled version of the average agreement rate between K-ary neighborhoods. Proposed MKL approach exploits the representation ability of every single method to reach a better embedded data for both getting more intelligible visualization and preserving the structure of data.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, Diego Hernán and Castro-Ospina, Andrés Eduardo and Alvarado-Pérez, Juan Carlos and Revelo-Fuelagán, Edgardo Javier}, doi = {10.1007/978-3-319-25751-8_75}, chapter = {Multiple Kernel Learning for Spectral Dimensionality Reduction}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {On the effect of inverse problem weighted solutions for epileptic sources localization}, type = {inproceedings}, year = {2015}, pages = {1-5}, websites = {https://ieeexplore.ieee.org/document/7330448}, month = {9}, publisher = {IEEE}, id = {9154e232-5332-3685-8e20-847dd83cfbb3}, created = {2020-12-29T22:52:03.356Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.655Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Acosta-Munoz2015}, private_publication = {false}, abstract = {Within the context of epileptic sources localization from electroencephalographic signals, this work presents an exploratory study aimed at studying the effect of channel weighting on the estimation of the inverse problem solution. In this study, we consider two weighting approaches followed from a relevance feature analysis based on variance and energy criteria. Such approaches are compared by measuring the difference between the estimated source activity and the true power of the simulated sources in terms of the Earth mover's distance. Experimental results show that the incorporation of proper weighting factors into a LORETA-driven solution, localization may be improved. As well, the physiological phenomenon of the brain activity is more precisely tracked.}, bibtype = {inproceedings}, author = {Acosta-Munoz, Melissa E. and Paredes-Argoty, Hugo A. and Revelo-Fuelagan, E. Javier and Peluffo-Ordonez, Diego H.}, doi = {10.1109/STSIVA.2015.7330448}, booktitle = {2015 20th Symposium on Signal Processing, Images and Computer Vision (STSIVA)} }
@inproceedings{ title = {Interactive interface for efficient data visualization via a geometric approach}, type = {inproceedings}, year = {2015}, pages = {1-6}, websites = {https://ieeexplore.ieee.org/document/7330397}, month = {9}, publisher = {IEEE}, id = {7f3c1e2e-884c-315a-b25a-a03514618837}, created = {2020-12-29T22:52:03.591Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.856Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Salazar-Castro2015}, private_publication = {false}, abstract = {Dimensionality reduction (DR) methods represent a suitable alternative to visualizing data. Nonetheless, most of them still lack the properties of interactivity and controllability. In this work, we propose a data visualization interface that allows for user interaction within an interactive framework. Specifically, our interface is based on a mathematic geometric model, which combines DR methods through a weighted sum. Interactivity is provided in the sense that weighting factors are given by the user via the selection of points inside a geometric surface. Then, (even non-expert) users can intuitively either select a concrete DR method or carry out a mixture of methods. Experimental results are obtained using artificial and real datasets, demonstrating the usability and applicability of our interface in DR-based data visualization.}, bibtype = {inproceedings}, author = {Salazar-Castro, J. A. and Rosas-Narvaez, Y. C. and Pantoja, A. D. and Alvarado-Perez, Juan C. and Peluffo-Ordonez, Diego H.}, doi = {10.1109/STSIVA.2015.7330397}, booktitle = {2015 20th Symposium on Signal Processing, Images and Computer Vision (STSIVA)} }
@inproceedings{ title = {Deforming objects via exponential homotopy: A first approach}, type = {inproceedings}, year = {2015}, pages = {1-6}, websites = {https://ieeexplore.ieee.org/document/7330401}, month = {9}, publisher = {IEEE}, id = {9ddbd13d-c0ef-31b9-85a9-8f133091b608}, created = {2020-12-29T22:52:04.187Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.523Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Alpala2015}, private_publication = {false}, abstract = {This work presents a novel alternative to conventional linear homotopy with suboptimal settings for applications on object deformation. Proposed approach extends the linear mapping to exponential representations that provides smooth transitions when deforming objects while homotopy conditions are fulfilled. As well, we introduce a quality indicator based on the ratio between the coefficients curve of resultant homotopy and those of a non-realistic, reference homotopy. Experimental results are promising and show the applicability of exponential homotopy to interpolating images with soft changes and homotopic geometric objects.}, bibtype = {inproceedings}, author = {Alpala, Luis O. and Peluffo-Ordonez, Diego H. and Gonzalez-Castano, Catalina and Guasmayan, Fredy A.}, doi = {10.1109/STSIVA.2015.7330401}, booktitle = {2015 20th Symposium on Signal Processing, Images and Computer Vision (STSIVA)} }
@inproceedings{ title = {Geometrical homotopy for data visualization}, type = {inproceedings}, year = {2015}, websites = {https://dial.uclouvain.be/pr/boreal/object/boreal%3A168996/datastream/PDF_01/view}, id = {af7a3b0f-a2a5-38c7-8820-8f4737aea5e2}, created = {2020-12-29T22:52:04.997Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.155Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2015b}, private_publication = {false}, abstract = {This work presents an approach allowing for an interactive visualization of dimensionality reduction outcomes, which is based on an extended view of conventional homotopy. The pairwise functional followed from a simple homotopic function can be incorporated within a geometrical framework in order to yield a biparametric approach able to combine several kernel matrices. Therefore, the users can establish the mixture of kernels in an intuitive fashion by only varying two parameters. Our approach is tested by using kernel alternatives for conventional methods of spectral dimensionality reduction such as multidimensional scalling, locally linear embedding and laplacian eigenmaps. The proposed mixture represents every single dimensionality reduction approach as well as helps users to find a suitable representation of embedded data.}, bibtype = {inproceedings}, author = {Peluffo-Ordóñez, Diego H. and Alvarado-Pérez, Juan C. and Lee, John A. and Verleysen, Michel}, booktitle = {23rd European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN 2015 - Proceedings} }
@inbook{ type = {inbook}, year = {2015}, keywords = {Data mining,Machine learning,Visualization}, pages = {167-173}, websites = {http://link.springer.com/10.1007/978-3-319-19638-1_19}, id = {7fec7593-40e4-304c-ab1b-6433d9b17d56}, created = {2020-12-29T22:52:05.428Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.381Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Alvarado-Perez2015a}, private_publication = {false}, abstract = {The large amount of data generated by different activities -academic,scientific, business and industrial activities, among others- contains meaningful information that allows developing processes and techniques, which have scientific validity to optimally explore such information. Doing so, we get newknowledge to properly make decisions. Nowadays a new and innovative field is rapidly growing in importance that is Artificial Intelligence, which involvescomputer processing devices of modern machines and human reasoning. Bysynergistically combining them -in other words, performing an integration of natural and artificial intelligence-, it is possible to discover knowledge in a more effective way in order to find hidden trends and patterns belonging to the predictive model database. As well, allowing for new observations and considerations from beforehand known data by using data analysis methods as well as the knowledge and skills (of holistic, flexible and parallel type) from human reasoning. This work briefly reviews main basics and recent works on artificial and natural intelligence integration in order to introduce users and researchers on this field integration approaches. As well, key aspects to conceptually compare them are provided.}, bibtype = {inbook}, author = {Alvarado-Pérez, Juan C. and Peluffo-Ordóńez, Diego H.}, doi = {10.1007/978-3-319-19638-1_19}, chapter = {Artificial and Natural Intelligence Integration}, title = {Advances in Intelligent Systems and Computing} }
@article{ title = {Multi-scale similarities in stochastic neighbour embedding: Reducing dimensionality while preserving both local and global structure}, type = {article}, year = {2015}, keywords = {Data visualisation,Jensen-Shannon divergence,Manifold learning,Nonlinear dimensionality reduction,Stochastic neighbour embedding}, pages = {246-261}, volume = {169}, websites = {https://linkinghub.elsevier.com/retrieve/pii/S0925231215003641}, month = {12}, id = {43243739-ddee-32bf-b1e0-a6a70e8cdcaa}, created = {2020-12-29T22:52:12.240Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-08-11T22:46:31.060Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Lee2015}, folder_uuids = {15b32c16-cda9-4551-a173-303b5217df48}, private_publication = {false}, abstract = {Stochastic neighbour embedding (SNE) and its variants are methods of nonlinear dimensionality reduction that involve soft Gaussian neighbourhoods to measure similarities for all pairs of data. In order to build a suitable embedding, these methods try to reproduce in a low-dimensional space the neighbourhoods that are observed in the high-dimensional data space. Previous works have investigated the immunity of such similarities to norm concentration, as well as enhanced cost functions, like sums of Jensen-Shannon divergences. This paper proposes an additional refinement, namely multi-scale similarities, which are averages of soft Gaussian neighbourhoods with exponentially growing bandwidths. Such multi-scale similarities can replace the regular, single-scale neighbourhoods in SNE-like methods. Their objective is then to maximise the embedding quality on all scales, with the best preservation of both local and global neighbourhoods, and also to exempt the user from having to fix a scale arbitrarily. Experiments with several data sets show that the proposed multi-scale approach captures better the structure of data and improves significantly the quality of dimensionality reduction.}, bibtype = {article}, author = {Lee, John A. and Peluffo-Ordóñez, Diego H. and Verleysen, Michel}, doi = {10.1016/j.neucom.2014.12.095}, journal = {Neurocomputing} }
@inbook{ type = {inbook}, year = {2015}, keywords = {Dynamic data,Kernels,Spectral clustering}, pages = {148-155}, websites = {http://link.springer.com/10.1007/978-3-319-18833-1_16}, id = {a69603bf-edb9-35fc-8345-a53450ab00bd}, created = {2020-12-29T22:52:12.547Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.270Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2015a}, private_publication = {false}, abstract = {Spectral clustering has shown to be a powerful technique for grouping and/or rank data as well as a proper alternative for unlabeled problems. Particularly, it is a suitable alternative when dealing with pattern recognition problems involving highly hardly separable classes. Due to its versatility, applicability and feasibility, this clustering technique results appealing for many applications. Nevertheless, conventional spectral clustering approaches lack the ability to process dynamic or time-varying data. Within a spectral framework, this work presents an overview of clustering techniques as well as their extensions to dynamic data analysis.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, D. H. and Alvarado-Pérez, J. C. and Castro-Ospina, A. E.}, doi = {10.1007/978-3-319-18833-1_16}, chapter = {On the Spectral Clustering for Dynamic Data}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Generalized Bonhoeffer-van der Pol oscillator for modelling cardiac pulse: Preliminary results}, type = {inproceedings}, year = {2015}, pages = {1-6}, websites = {http://ieeexplore.ieee.org/document/7345211/}, month = {10}, publisher = {IEEE}, id = {f95f6ae9-f588-3f09-a3db-bfa445d243ac}, created = {2020-12-29T22:52:12.649Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.559Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2015}, private_publication = {false}, abstract = {This work presents an approach for modelling cardiac pulse from electrocardiographic signals (ECG). We explore the use of the Bonhoeffer-van der Pol (BVP) model-being a generalized version of the van der Pol oscillator - which, under proper parameters, is able to describe action potentials, and it can be then adapted to modelling normal cardiac pulse. Using basics of non-linear dynamics and some algebra, the BVP system response is estimated. To account for an adaptive response for every single heartbeat, we propose a parameter tuning method based on a heuristic search in order to yield responses that morphologically resemble real ECG. This aspect is important since heartbeats have intrinsically strong variability in terms of both shape and length. Experiments are carried out over real ECG from MIT-BIH arrhythmias database. We perform a bifurcation and phase portrait analysis to explore the relationship between non-linear dynamics features and pathology. Preliminary results provided here are promising showing some hints about the ability of non-linear systems modelling ECG to characterize heartbeats and facilitate the classification thereof, being latter very important for diagnosing purposes.}, bibtype = {inproceedings}, author = {Peluffo-Ordonez, D. H. and Rodriguez-Sotelo, J. L. and Revelo-Fuelagan, E. J. and Ospina-Aguirre, C. and Olivard-Tost, G.}, doi = {10.1109/CCAC.2015.7345211}, booktitle = {2015 IEEE 2nd Colombian Conference on Automatic Control (CCAC)} }
@inproceedings{ title = {Segment clustering methodology for unsupervised Holter recordings analysis}, type = {inproceedings}, year = {2015}, pages = {92870M}, websites = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2073882}, month = {1}, day = {28}, id = {a70dab25-62c9-3747-a039-750fef8ac07a}, created = {2020-12-29T22:52:13.117Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.741Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rodriguez-Sotelo2015}, private_publication = {false}, abstract = {© 2015 SPIE. Cardiac arrhythmia analysis on Holter recordings is an important issue in clinical settings, however such issue implicitly involves attending other problems related to the large amount of unlabelled data which means a high computational cost. In this work an unsupervised methodology based in a segment framework is presented, which consists of dividing the raw data into a balanced number of segments in order to identify fiducial points, characterize and cluster the heartbeats in each segment separately. The resulting clusters are merged or split according to an assumed criterion of homogeneity. This framework compensates the high computational cost employed in Holter analysis, being possible its implementation for further real time applications. The performance of the method is measure over the records from the MIT/BIH arrhythmia database and achieves high values of sensibility and specificity, taking advantage of database labels, for a broad kind of heartbeats types recommended by the AAMI.}, bibtype = {inproceedings}, author = {Rodríguez-Sotelo, Jose Luis and Peluffo-Ordoñez, Diego and Castellanos Dominguez, German}, editor = {Romero, Eduardo and Lepore, Natasha}, doi = {10.1117/12.2073882}, booktitle = {10th International Symposium on Medical Information Processing and Analysis} }
@inproceedings{ title = {Knowledge discovery in databases from a perspective of intelligent information visualization}, type = {inproceedings}, year = {2015}, pages = {1-7}, websites = {https://ieeexplore.ieee.org/document/7330438}, month = {9}, publisher = {IEEE}, id = {50d0084a-cf1b-3053-ad62-c7ed55f3b9ea}, created = {2020-12-29T22:52:13.835Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.031Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Alvarado-Perez2015}, private_publication = {false}, abstract = {This paper reviews some recent and classical, relevant works on information visualization with a special focus on those applied to big data. The central idea dealt in this work relies on how to perform data mining tasks in a visual fashion; that is, using graphical correlation and interaction techniques. The scope of this review encompasses visualization techniques, formal visualization systems, and smart information visualization models. As well, newest approaches consisting of visualization and data mining integration process are explained.}, bibtype = {inproceedings}, author = {Alvarado-Perez, Juan C. and Bolanos-Ramirez, Harold and Peluffo-Ordonez, Diego H. and Murillo, S.}, doi = {10.1109/STSIVA.2015.7330438}, booktitle = {2015 20th Symposium on Signal Processing, Images and Computer Vision (STSIVA)} }
@article{ title = {Bridging the gap between human knowledge and machine learning}, type = {article}, year = {2015}, pages = {54}, volume = {4}, websites = {http://campus.usal.es/~revistas_trabajo/index.php/2255-2863/article/view/ADCAIJ2015415464}, month = {10}, day = {6}, id = {40da7078-1e39-3aa7-9a8a-9990e83766e4}, created = {2020-12-29T22:52:13.921Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-08-11T22:46:29.538Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {ALVARADO-PEREZ2015}, folder_uuids = {15b32c16-cda9-4551-a173-303b5217df48}, private_publication = {false}, abstract = {Nowadays, great amount of data is being created by several sources from academic, scientific, business and industrial activities. Such data intrinsically contains meaningful information allowing for developing techniques, and have scientific validity to explore the information thereof. In this connection, the aim of artificial intelligence (AI) is getting new knowledge to make decisions properly. AI has taken an important place in scientific and technology development communities, and recently develops computer-based processing devices for modern machines. Under the premise, the premise that the feedback provided by human reasoning -which is holistic, flexible and parallel- may enhance the data analysis, the need for the integration of natural and artificial intelligence has emerged. Such an integration makes the process of knowledge discovery more effective, providing the ability to easily find hidden trends and patterns belonging to the database predictive model. As well, allowing for new observations and considerations from beforehand known data by using both data analysis methods and knowledge and skills from human reasoning. In this work, we review main basics and recent works on artificial and natural intelligence integration in order to introduce users and researchers on this emergent field. As well, key aspects to conceptually compare them are provided.}, bibtype = {article}, author = {ALVARADO-PÉREZ, Juan Carlos and PELUFFO-ORDÓÑEZ, Diego H. and THERÓN, Roberto}, doi = {10.14201/ADCAIJ2015415464}, journal = {ADCAIJ: ADVANCES IN DISTRIBUTED COMPUTING AND ARTIFICIAL INTELLIGENCE JOURNAL}, number = {1} }
@inproceedings{ title = {Effectiveness of morphological and spectral heartbeat characterization on arrhythmia clustering for Holter recordings}, type = {inproceedings}, year = {2015}, pages = {92870A}, websites = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2070686}, month = {1}, day = {28}, id = {34250818-5ccc-31e0-88f8-99833cda0ea7}, created = {2020-12-29T22:52:16.396Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.272Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Castro-Hoyos2015}, private_publication = {false}, abstract = {© 2015 SPIE. Heartbeat characterization is an important issue in cardiac assistance diagnosis systems. In particular, wide sets of features are commonly used in long term electrocardiographic signals. Then, if such a feature space does not represent properly the arrhythmias to be grouped, classification or clustering process may fail. In this work a suitable feature set for different heartbeat types is studied, involving morphology, representation and time-frequency features. To determine what kind of features generate better clusters, feature selection procedure is used and assessed by means clustering validity measures. Then the feature subset is shown to produce fine clustering that yields into high sensitivity and specificity values for a broad range of heartbeat types.}, bibtype = {inproceedings}, author = {Castro-Hoyos, Cristian and Peluffo-Ordóñez, Diego Hernán and Rodríguez-Sotelo, Jose Luis and Castellanos-Domínguez, Germán}, editor = {Romero, Eduardo and Lepore, Natasha}, doi = {10.1117/12.2070686}, booktitle = {10th International Symposium on Medical Information Processing and Analysis} }
@inproceedings{ title = {Unsupervised relevance analysis for feature extraction and selection: A distance-based approach for feature relevance}, type = {inproceedings}, year = {2014}, keywords = {Feature extraction,Feature relevance,Feature selection,M-norm,PCA}, websites = {https://dial.uclouvain.be/pr/boreal/object/boreal:171343}, id = {32011ae9-289b-33f6-98cd-e002dd82e2d6}, created = {2020-12-09T05:20:59.173Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T22:38:58.877Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The aim of this paper is to propose a new generalized formulation for feature extraction based on distances from a feature relevance point of view. This is done within an unsupervised framework. To do so, it is first outlined the formal concept of feature relevance. Then, a novel feature extraction approach is introduced. Such an approach employs the M-norm as a distance measure. It is demonstrated that under some conditions, this method can readily explain literature methods. As another contribution of this paper, we propose an elegant feature ranking approach for feature selection followed from the spectral analysis of the data variability. Also, we provide a weighted PCA scheme revealing the relationship between feature extraction and feature selection. To assess the behavior of the studied methods within a pattern recognition system, a clustering stage is carried out. Normalized mutual information is used to quantify the quality of resultant clusters. Proposed methods reach comparable results with respect to literature methods. Copyright © 2014 SCITEPRESS.}, bibtype = {inproceedings}, author = {Peluffo, D.H. and Lee, J.A. and Verleysen, M. and Rodríguez, J.L. and Castellanos-Domínguez, G.}, booktitle = {ICPRAM 2014 - Proceedings of the 3rd International Conference on Pattern Recognition Applications and Methods} }
@inproceedings{ title = {Optimal data projection for kernel spectral clustering}, type = {inproceedings}, year = {2014}, websites = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2014-163.pdf}, id = {273ea9f6-8219-37ea-9cfd-a21029b80b70}, created = {2020-12-09T05:20:59.795Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T22:41:14.473Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Spectral clustering has taken an important place in the context of pattern recognition, being a good alternative to solve problems with non-linearly separable groups. Because of its unsupervised nature, clustering methods are often parametric, requiring then some initial parameters. Thus, clustering performance is greatly dependent on the selection of those initial parameters. Furthermore, tuning such parameters is not an easy task when the initial data representation is not adequate. Here, we propose a new projection for input data to improve the cluster identification within a kernel spectral clustering framework. The proposed projection is done from a feature extraction formulation, in which a generalized distance involving the kernel matrix is used. Data projection shows to be useful for improving the performance of kernel spectral clustering.}, bibtype = {inproceedings}, author = {Peluffo, D.H. and Alzate, C. and Suykens, J.A.K. and Castellanos-Dominguez, G.}, booktitle = {22nd European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN 2014 - Proceedings} }
@inproceedings{ title = {Recent methods for dimensionality reduction: A brief comparative analysis}, type = {inproceedings}, year = {2014}, websites = {https://dial.uclouvain.be/pr/boreal/object/boreal:171353}, id = {6aab1bd7-510c-3b0a-8540-f20432da0b68}, created = {2020-12-09T05:26:50.166Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-08-11T22:46:30.428Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo2014}, folder_uuids = {15b32c16-cda9-4551-a173-303b5217df48}, private_publication = {false}, abstract = {Dimensionality reduction is a key stage for both the design of a pattern recognition system or data visualization. Recently, there has been a increasing interest in those methods aimed at preserving the data topology. Among them, Laplacian eigenmaps (LE) and stochastic neighbour embedding (SNE) are the most representative. In this work, we present a brief comparative among very recent methods being alternatives to LE and SNE. Comparisons are done mainly on two aspects: algorithm implementation, and complexity. Also, relations between methods are depicted. The goal of this work is providing researches on this field with some discussion as well as criteria decision to choose a method according to the user's needs and/or keeping a good trade-off between performance and required processing time.}, bibtype = {inproceedings}, author = {Peluffo, D.H. and Lee, J.A. and Verleysen, M.}, booktitle = {22nd European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN 2014 - Proceedings} }
@article{ title = {Automatic Sleep Stages Classification Using EEG Entropy Features and Unsupervised Pattern Analysis Techniques}, type = {article}, year = {2014}, keywords = {Clustering,Feature extraction,Feature selection,Q-α,Relevance analysis,Signal entropy,Sleep stages}, pages = {6573-6589}, volume = {16}, websites = {http://www.mdpi.com/1099-4300/16/12/6573}, month = {12}, day = {17}, id = {ac47a75d-140c-3063-acda-91820ae17a8f}, created = {2020-12-09T05:26:52.657Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T22:40:04.738Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {© 2014 by the authors. Sleep is a growing area of research interest in medicine and neuroscience. Actually, one major concern is to find a correlation between several physiologic variables and sleep stages. There is a scientific agreement on the characteristics of the five stages of human sleep, based on EEG analysis. Nevertheless, manual stage classification is still the most widely used approach. This work proposes a new automatic sleep classification method based on unsupervised feature classification algorithms recently developed, and on EEG entropy measures. This scheme extracts entropy metrics from EEG records to obtain a feature vector. Then, these features are optimized in terms of relevance using the Q-α algorithm. Finally, the resulting set of features is entered into a clustering procedure to obtain a final segmentation of the sleep stages. The proposed method reached up to an average of 80% correctly classified stages for each patient separately while keeping the computational cost low.}, bibtype = {article}, author = {Rodríguez-Sotelo, Jose and Osorio-Forero, Alejandro and Jiménez-Rodríguez, Alejandro and Cuesta-Frau, David and Cirugeda-Roldán, Eva and Peluffo, Diego}, doi = {10.3390/e16126573}, journal = {Entropy}, number = {12} }
@inproceedings{ title = {Novel spectral characteristics of the electrical current waveform to quantifying power quality on LED lamps}, type = {inproceedings}, year = {2014}, keywords = {Power quality,lighting systems,periodogram,power factor corrector,power spectral density}, pages = {1-5}, websites = {http://ieeexplore.ieee.org/document/7010182/}, month = {9}, publisher = {IEEE}, id = {ee0e191c-85e2-347a-aad3-642f4f3d99e8}, created = {2020-12-29T22:52:03.594Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.975Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2014a}, private_publication = {false}, abstract = {LED lamps are widely used in household, nonetheless they are still non-linear. Therefore, LEDs need a power supply system to correcting their operation, which introduces nonlinearities into the electrical grid and distortions on waveform. Then analysis and quantification of electrical signals is becoming a key issue. This work presents a spectral analysis of analysis of electric current signal in LEDs lamps which yields a novel collection of characteristics and measures to quantify the waveform quality. In particular, periodogram and Fourier transform are considered. For experiments, two circuits are considered: one that corresponds to the commercial LEDs lamp connected to AC source and another one incorporating a power factor corrector. Experimentally, the usefulness and applicability of proposed characteristics is proved.}, bibtype = {inproceedings}, author = {Peluffo-Ordonez, Diego Hernan and Revelo-Fuelagan, Edgardo Javier}, doi = {10.1109/STSIVA.2014.7010182}, booktitle = {2014 XIX Symposium on Image, Signal Processing and Artificial Vision} }
@inbook{ type = {inbook}, year = {2014}, keywords = {Dimensionality reduction,divergences,similarity,stochastic neighbor embedding}, pages = {65-74}, websites = {http://link.springer.com/10.1007/978-3-319-07695-9_6}, id = {342148f7-33c5-35ba-b90e-79795a0ee069}, created = {2020-12-29T22:52:03.954Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.187Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2014d}, private_publication = {false}, abstract = {Dimensionality reduction methods aimed at preserving the data topology have shown to be suitable for reaching high-quality embedded data. In particular, those based on divergences such as stochastic neighbour embedding (SNE). The big advantage of SNE and its variants is that the neighbor preservation is done by optimizing the similarities in both high- and low-dimensional space. This work presents a brief review of SNE-based methods. Also, a comparative analysis of the considered methods is provided, which is done on important aspects such as algorithm implementation, relationship between methods, and performance. The aim of this paper is to investigate recent alternatives to SNE as well as to provide substantial results and discussion to compare them. © Springer International Publishing Switzerland 2014.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, Diego H. and Lee, John A. and Verleysen, Michel}, doi = {10.1007/978-3-319-07695-9_6}, chapter = {Short Review of Dimensionality Reduction Methods Based on Stochastic Neighbour Embedding}, title = {Advances in Intelligent Systems and Computing} }
@inproceedings{ title = {Multiscale stochastic neighbor embedding: Towards parameter-free dimensionality reduction}, type = {inproceedings}, year = {2014}, websites = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2014-64.pdf}, id = {22609047-9f63-3a78-b1de-e83a94c052e7}, created = {2020-12-29T22:52:04.791Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.077Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Lee2014}, private_publication = {false}, abstract = {Stochastic neighbor embedding (SNE) is a method of dimensionality reduction that involves softmax similarities measured between all pairs of data points. To build a suitable embedding, SNE tries to reproduce in a low-dimensional space the similarities that are observed in the high-dimensional data space. Previous work has investigated the immunity of such similarities to norm concentration, as well as enhanced cost functions. This paper proposes an additional refinement, in the form of multiscale similarities, namely averages of softmax ratios with decreasing bandwidths. The objective is to maximize the embedding quality at all scales, with a better preservation of both local and global neighborhoods, and also to exempt the user from having to fix a scale arbitrarily. Experiments on several data sets show that this multiscale version of SNE, combined with an appropriate cost function (sum of Jensen-Shannon divergences), outperforms all previous variants of SNE.}, bibtype = {inproceedings}, author = {Lee, John A. and Peluffo-Ordóñez, Diego H. and Verleysen, Michel}, booktitle = {22nd European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN 2014 - Proceedings} }
@inproceedings{ title = {Generalized kernel framework for unsupervised spectral methods of dimensionality reduction}, type = {inproceedings}, year = {2014}, pages = {171-177}, websites = {http://ieeexplore.ieee.org/document/7008664/}, month = {12}, publisher = {IEEE}, id = {70a2b317-3ab0-3920-9fea-104a43e03602}, created = {2020-12-29T22:52:12.933Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.580Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2014}, private_publication = {false}, abstract = {This work introduces a generalized kernel perspective for spectral dimensionality reduction approaches. Firstly, an elegant matrix view of kernel principal component analysis (PCA) is described. We show the relationship between kernel PCA, and conventional PCA using a parametric distance. Secondly, we introduce a weighted kernel PCA framework followed from least-squares support vector machines (LS-SVM). This approach starts with a latent variable that allows to write a relaxed LS-SVM problem. Such a problem is addressed by a primal-dual formulation. As a result, we provide kernel alternatives to spectral methods for dimensionality reduction such as multidimensional scaling, locally linear embedding, and laplacian eigenmaps; as well as a versatile framework to explain weighted PCA approaches. Experimentally, we prove that the incorporation of a SVM model improves the performance of kernel PCA.}, bibtype = {inproceedings}, author = {Peluffo-Ordonez, Diego H. and Lee, John Aldo and Verleysen, Michel}, doi = {10.1109/CIDM.2014.7008664}, booktitle = {2014 IEEE Symposium on Computational Intelligence and Data Mining (CIDM)} }
@inproceedings{ title = {A multi-class extension for multi-labeler support vector machines}, type = {inproceedings}, year = {2014}, websites = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2014-169.pdf}, id = {6b7fe960-fa93-3457-97c7-dc0fede605b7}, created = {2020-12-29T22:52:13.639Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.794Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2014c}, private_publication = {false}, abstract = {In recent years, there has been an increasing interest in the design of pattern recognition systems able to deal with labels coming from multiple sources. To avoid bias during the learning process, in some applications it is strongly recommended to learn from a set of panelists or experts instead of only one. In particular, two aspects are of interest, namely: discriminating between confident and unconfident labelers, and determining the suitable ground truth. This work presents an extension of a previous work, which consists of a generalization of the two class case via a modified one-against-all approach. This approach uses modified classifiers able to learn from multi-labeler settings. This is done within a soft-margin support vector machine framework. Proposed method provides ranking values for panelist as well as an estimate of the ground truth.}, bibtype = {inproceedings}, author = {Peluffo-Ordóñez, D. H. and Murillo-Rendón, S. and Arias-Londoño, J. D. and Castellanos-Domínguez, G.}, booktitle = {22nd European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN 2014 - Proceedings} }
@inbook{ type = {inbook}, year = {2014}, pages = {408-415}, websites = {http://link.springer.com/10.1007/978-3-319-12568-8_50}, id = {93cccb8b-7bb0-34d4-a5f2-390117af7f4e}, created = {2020-12-29T22:52:13.832Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.869Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2014b}, private_publication = {false}, abstract = {This work describes a novel quadratic formulation for solving the normalized cuts-based clustering problem as an alternative to spectral clustering approaches. Such formulation is done by establishing simple and suitable constraints, which are further relaxed in order to write a quadratic functional with linear constraints. As a meaningful result of this work, we accomplish a deterministic solution instead of using a heuristic search. Our method reaches comparable performance against conventional spectral methods, but spending significantly lower processing time.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, D. H. and Castro-Hoyos, C. and Acosta-Medina, Carlos D. and Castellanos-Domínguez, Germán}, doi = {10.1007/978-3-319-12568-8_50}, chapter = {Quadratic Problem Formulation with Linear Constraints for Normalized Cut Clustering}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Support vector machine-based approach for multi-labelers problems}, type = {inproceedings}, year = {2013}, websites = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2013-118.pdf}, id = {2e31c347-8ac9-3e66-b4b0-e9814b24a253}, created = {2020-12-09T05:20:42.419Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T22:36:50.766Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {We propose a first approach to quantify the panelist's labeling generalizing a soft-margin support vector machine classifier to multi-labeler analysis. Our approach consists of formulating a quadratic optimization problem instead of using a heuristic search algorithm. We determine penalty factors for each panelist by incorporating a linear combination in the primal formulation. Solution is obtained on a dual formulation using quadratic programming. For experiments, the well-known Iris with multiple simulated artificial labels and a multi-label speech database are employed. Obtained penalty factors are compared with both standard supervised and non-supervised measurements. Promising results show that proposed method is able to asses the concordance among panelists considering the structure of data.}, bibtype = {inproceedings}, author = {Murillo, S. and Peluffo, D.H. and Castellanos, G.}, booktitle = {ESANN 2013 proceedings, 21st European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning} }
@inbook{ type = {inbook}, year = {2013}, keywords = {Kernels,motion tracking,spectral clustering}, pages = {264-273}, websites = {http://link.springer.com/10.1007/978-3-642-38637-4_27}, id = {40bd2874-4ff8-30e8-8e76-267e4ac37156}, created = {2020-12-29T22:52:03.092Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.630Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2013b}, private_publication = {false}, abstract = {This work introduces a first approach to track moving-samples or frames matching each sample to a single meaningful value. This is done by combining the kernel spectral clustering with a feature relevance procedure that is extended to rank the frames in order to track the dynamic behavior along a frame sequence. We pose an optimization problem to determine the tracking vector, which is solved by the eigenvectors given by the clustering method. Unsupervised approaches are preferred since, for motion tracking applications, labeling is unavailable in practice. For experiments, two databases are considered: Motion Caption and an artificial three-moving Gaussian in which the mean changes per frame. Proposed clustering is compared with kernel K-means and Min-Cuts by using normalized mutual information and adjusted random index as metrics. Results are promising showing clearly that there exists a direct relationship between the proposed tracking vector and the dynamic behavior. © 2013 Springer-Verlag.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, D. and García-Vega, S. and Castellanos-Domínguez, C. G.}, doi = {10.1007/978-3-642-38637-4_27}, chapter = {Kernel Spectral Clustering for Motion Tracking: A First Approach}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inbook{ type = {inbook}, year = {2013}, keywords = {Dynamic data,Kernels,Spectral clustering,Support vector machines}, pages = {238-245}, websites = {http://link.springer.com/10.1007/978-3-642-41822-8_30}, id = {1e804a7a-4483-32ac-9f46-2f168dabf867}, created = {2020-12-29T22:52:04.572Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.741Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2013c}, private_publication = {false}, abstract = {This paper introduces a novel spectral clustering approach based on kernels to analyze time-varying data. Our approach is developed within a multiple kernel learning framework, which, in this case is assumed as a linear combination model. To perform such linear combination, weighting factors are estimated by a ranking procedure yielding a vector calculated from the eigenvectors-derived-clustering-method. Particularly, the method named kernel spectral clustering is considered. Proposed method is compared to some conventional spectral clustering techniques, namely, kernel k-means and min-cuts. Standard k-means as well. The clustering performance is quantified by the normalized mutual information and Adjusted Rand Index measures. Experimental results prove that proposed approach is an useful tool for both tracking and clustering dynamic data, being able to manage applications for human motion analysis. © Springer-Verlag 2013.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, Diego Hernán and García-Vega, Sergio and Álvarez-Meza, Andrés Marino and Castellanos-Domínguez, César Germán}, doi = {10.1007/978-3-642-41822-8_30}, chapter = {Kernel Spectral Clustering for Dynamic Data}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Novel heuristic search for ventricular arrhythmia detection using normalized cut clustering}, type = {inproceedings}, year = {2013}, keywords = {Cardiac arrhythmia,heuristic search,kernel density estimator,normalized cut clustering}, pages = {7076-7079}, websites = {http://ieeexplore.ieee.org/document/6611188/}, month = {7}, publisher = {IEEE}, id = {d5462696-d8e5-3d0f-9083-7092f141cd4c}, created = {2020-12-29T22:52:11.941Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.910Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Castro-Ospina2013}, private_publication = {false}, abstract = {Processing of the long-term ECG Holter recordings for accurate arrhythmia detection is a problem that has been addressed in several approaches. However, there is not an outright method for heartbeat classification able to handle problems such as the large amount of data and highly unbalanced classes. This work introduces a heuristic-search-based clustering to discriminate among ventricular cardiac arrhythmias in Holter recordings. The proposed method is posed under the normalized cut criterion, which iteratively seeks for the nodes to be grouped into the same cluster. Searching procedure is carried out in accordance to the introduced maximum similarity value. Since our approach is unsupervised, a procedure for setting the initial algorithm parameters is proposed by fixing the initial nodes using a kernel density estimator. Results are obtained from MIT/BIH arrhythmia database providing heartbeat labelling. As a result, proposed heuristic-search-based clustering shows an adequate performance, even in the presence of strong unbalanced classes. © 2013 IEEE.}, bibtype = {inproceedings}, author = {Castro-Ospina, A. E. and Castro-Hoyos, C. and Peluffo-Ordonez, D. and Castellanos-Dominguez, G.}, doi = {10.1109/EMBC.2013.6611188}, booktitle = {2013 35th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)} }
@inproceedings{ title = {Kernel spectral clustering for dynamic data using multiple kernel learning}, type = {inproceedings}, year = {2013}, pages = {1-6}, websites = {http://ieeexplore.ieee.org/document/6706858/}, month = {8}, publisher = {IEEE}, id = {928de0e3-db4a-3149-9206-bfccb46b04c8}, created = {2020-12-29T22:52:13.238Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.705Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2013}, private_publication = {false}, abstract = {In this paper we propose a kernel spectral clustering-based technique to catch the different regimes experienced by a time-varying system. Our method is based on a multiple kernel learning approach, which is a linear combination of kernels. The calculation of the linear combination coefficients is done by determining a ranking vector that quantifies the overall dynamical behavior of the analyzed data sequence over-time. This vector can be calculated from the eigenvectors provided by the the solution of the kernel spectral clustering problem. We apply the proposed technique to a trial from the Graphics Lab Motion Capture Database from Carnegie Mellon University, as well as to a synthetic example, namely three moving Gaussian clouds. For comparison purposes, some conventional spectral clustering techniques are also considered, namely, kernel k-means and min-cuts. Also, standard k-means. The normalized mutual information and adjusted random index metrics are used to quantify the clustering performance. Results show the usefulness of proposed technique to track dynamic data, even being able to detect hidden objects. © 2013 IEEE.}, bibtype = {inproceedings}, author = {Peluffo-Ordonez, D. and Garcia-Vega, S. and Langone, R. and Suykens, J. A. K. and Castellanos-Dominguez, G.}, doi = {10.1109/IJCNN.2013.6706858}, booktitle = {The 2013 International Joint Conference on Neural Networks (IJCNN)} }
@inproceedings{ title = {Normalized cuts clustering with prior knowledge and a pre-clustering stage}, type = {inproceedings}, year = {2013}, websites = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2013-90.pdf}, id = {27512f93-f3d5-3a89-ad7f-48741dbc0b4d}, created = {2020-12-29T22:52:16.342Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.237Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2013a}, private_publication = {false}, abstract = {Clustering is of interest in cases when data are not labeled enough and a prior training stage is unfeasible. In particular, spectral clustering based on graph partitioning is of interest to solve problems with highly non-linearly separable classes. However, spectral methods, such as the well-known normalized cuts, involve the computation of eigenvectors that is a highly time-consuming task in case of large data. In this work, we propose an alternative to solve the normalized cuts problem for clustering, achieving same results as conventional spectral methods but spending less processing time. Our method consists of a heuristic search to find the best cluster binary indicator matrix, in such a way that each pair of nodes with greater similarity value are first grouped and the remaining nodes are clustered following a heuristic algorithm to search into the similarity-based representation space. The proposed method is tested over a public domain image data set. Results show that our method reaches comparable results with a lower computational cost.}, bibtype = {inproceedings}, author = {Peluffo-Ordoñez, D. and Castro-Ospina, A. E. and Chavez-Chamorro, D. and Acosta-Medina, C. D. and Castellanos-Dominguez, G.}, booktitle = {ESANN 2013 proceedings, 21st European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning} }
@inbook{ type = {inbook}, year = {2013}, keywords = {Bi-class classifier,multi-labeler analysis,quadratic programming,support vector machines}, pages = {274-282}, websites = {http://link.springer.com/10.1007/978-3-642-38637-4_28}, id = {6e0902cb-570c-3838-9cec-ec045f8f8fbe}, created = {2020-12-29T22:52:16.523Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.334Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Murillo-Rendon2013}, private_publication = {false}, abstract = {This work presents an approach to quantify the quality of panelist's labeling by means of a soft-margin support vector machine formulation for a bi-class classifier, which is extended to multi-labeler analysis. This approach starts with a formulation of an objective function to determine a suitable hyperplane of decision for classification tasks. Then, this formulation is expressed in a soft-margin form by introducing some slack variables. Finally, we determine penalty factors for each panelist. To this end, a panelist's effect term is incorporated in the primal soft-margin problem. Such problem is solved by deriving a dual formulation as a quadratic programming problem. For experiments, the well-known Iris database is employed by simulating multiple artificial labels. The obtained penalty factors are compared with standard supervised measures calculated from confusion matrix. The results show that penalty factors are related to the nature of data, allowing to properly quantify the concordance among panelists. © 2013 Springer-Verlag.}, bibtype = {inbook}, author = {Murillo-Rendón, S. and Peluffo-Ordóñez, D. and Arias-Londoño, J. D. and Castellanos-Domínguez, C. G.}, doi = {10.1007/978-3-642-38637-4_28}, chapter = {Multi-labeler Analysis for Bi-class Problems Based on Soft-Margin Support Vector Machines}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Constrained affinity matrix for spectral clustering: A basic semi-supervised extension}, type = {inproceedings}, year = {2012}, keywords = {Affinity matrix,kernel methods,prior information,semi-supervised analysis,spectral clustering}, pages = {242-245}, websites = {http://ieeexplore.ieee.org/document/6340590/}, month = {9}, publisher = {IEEE}, id = {33fb2f78-179e-34a4-bd89-ab269afab069}, created = {2020-12-09T05:26:50.440Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T22:51:18.685Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Spectral clustering has represented a good alternative in digital signal processing and pattern recognition; however a decision concerning the affinity functions among data is still an issue. In this work it is presented an extended version of a traditional multiclass spectral clustering method which employs prior information about the classified data into the affinity matrixes aiming to maintain the background relation that might be lost in the traditional manner, that is using a scaled exponential affinity matrix constrained by weighting the data according to some prior knowledge and via k-way normalized cuts clustering, results in a semi-supervised methodology of traditional spectral clustering. Test was performed over toy data classification and image segmentation and evaluated with and unsupervised performance measures (group coherence, fisher criteria and silhouette). © 2012 IEEE.}, bibtype = {inproceedings}, author = {Castro-Hoyos, C. and Peluffo, D. H. and Castellanos, C. G.}, doi = {10.1109/STSIVA.2012.6340590}, booktitle = {2012 XVII Symposium of Image, Signal Processing, and Artificial Vision (STSIVA)} }
@inproceedings{ title = {Numerical investigation of low level OSNR estimation based on gaussian fitting and non-linear least squares on AAH in noisy optical communication links}, type = {inproceedings}, year = {2012}, keywords = {Asynchronous amplitude histograms(AAH),gaussian fitting,non-linear least squares(NLLS),optical performance monitoring systems(OPMS),optical signal to noise ratio(OSNR)}, pages = {1-6}, websites = {http://ieeexplore.ieee.org/document/6233657/}, month = {5}, publisher = {IEEE}, id = {40b7d11d-35a5-3e66-8fe5-d5d9e648e6bc}, created = {2020-12-09T05:26:51.113Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-03-29T22:51:18.679Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {An extended digital estimation approach of optical signal to noise ratio (OSNR) based on statistical analysis of asynchronous amplitude histograms (AAH) of the received optical signal is presented and numerically investigated. Accurate OSNR estimation on highly noisy optical transmission link is achieved. Furthermore, the proposed OSNR estimation approach may be digitally adjusted to any Cartesian modulation format such as multilevel phase shift keying and quadrature amplitude modulated optical signals without degrading estimation accuracy. The OSNR estimation methodology is based on kernel density estimation with Gaussian kernels and non-linear least-squares regression (NLLS). Heuristic searches are no longer needed and the process becomes more reliable and robust. Reported results show accurate OSNR estimation with less than 15% error estimation on the simulated OSNR value for different signal modulation formats, exhibiting a more confident estimation system, with comparable results among formats because of the statistical nature histogram instead of the regular counting bins histogram. © 2012 IEEE.}, bibtype = {inproceedings}, author = {Castro H., Cristian and Peluffo O., Diego H. and Diaz, Oscar Marino and Guerrero G., Neil}, doi = {10.1109/ColComCon.2012.6233657}, booktitle = {2012 IEEE Colombian Communications Conference (COLCOM)} }
@inbook{ type = {inbook}, year = {2012}, pages = {130-137}, websites = {http://link.springer.com/10.1007/978-3-642-33275-3_16}, id = {6a656208-fdd7-362c-b9c1-40a444afc60e}, created = {2020-12-29T22:52:12.437Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:33.421Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2012a}, private_publication = {false}, abstract = {In this work, we present an improved multi-class spectral clustering (MCSC) that represents an alternative to the standard k-way normalized clustering, avoiding the use of an iterative algorithm for tuning the orthogonal matrix rotation. The performance of proposed method is compared with the conventional MCSC and k-means in terms of different clustering quality indicators. Results are accomplished on commonly used toy data sets with hardly separable classes, as well as on an image segmentation database. In addition, as a clustering indicator, a novel unsupervised measure is introduced to quantify the performance of the proposed method. The proposed method spends lower processing time than conventional spectral clustering approaches. © 2012 Springer-Verlag.}, bibtype = {inbook}, author = {Peluffo-Ordóñez, Diego Hernán and Acosta-Medina, Carlos Daniel and Castellanos-Domínguez, César Germáan}, doi = {10.1007/978-3-642-33275-3_16}, chapter = {An Improved Multi-Class Spectral Clustering Based on Normalized Cuts}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {Unsupervised feature relevance analysis applied to improve ECG heartbeat clustering}, type = {article}, year = {2012}, keywords = {Electrocardiogram analysis,Feature selection,Heartbeat classification,Q-α algorithm,Relevance analysis}, pages = {250-261}, volume = {108}, websites = {https://linkinghub.elsevier.com/retrieve/pii/S0169260712001095}, month = {10}, id = {3c56720e-8a87-36b9-820b-a490e01cb4c0}, created = {2020-12-29T22:52:13.335Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.660Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rodriguez-Sotelo2012}, private_publication = {false}, abstract = {The computer-assisted analysis of biomedical records has become an essential tool in clinical settings. However, current devices provide a growing amount of data that often exceeds the processing capacity of normal computers. As this amount of information rises, new demands for more efficient data extracting methods appear.This paper addresses the task of data mining in physiological records using a feature selection scheme. An unsupervised method based on relevance analysis is described. This scheme uses a least-squares optimization of the input feature matrix in a single iteration. The output of the algorithm is a feature weighting vector.The performance of the method was assessed using a heartbeat clustering test on real ECG records. The quantitative cluster validity measures yielded a correctly classified heartbeat rate of 98.69% (specificity), 85.88% (sensitivity) and 95.04% (general clustering performance), which is even higher than the performance achieved by other similar ECG clustering studies. The number of features was reduced on average from 100 to 18, and the temporal cost was a 43% lower than in previous ECG clustering schemes. © 2012 Elsevier Ireland Ltd.}, bibtype = {article}, author = {Rodríguez-Sotelo, J.L. and Peluffo-Ordoñez, D. and Cuesta-Frau, D. and Castellanos-Domínguez, G.}, doi = {10.1016/j.cmpb.2012.04.007}, journal = {Computer Methods and Programs in Biomedicine}, number = {1} }
@inbook{ type = {inbook}, year = {2012}, keywords = {Kernel learning,Relevance analysis,Spectral clustering}, pages = {501-510}, websites = {http://link.springer.com/10.1007/978-3-642-34654-5_51}, id = {8682e3d1-4ad3-3461-9125-2dc6b61a2f67}, created = {2020-12-29T22:52:14.345Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.423Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Molina-Giraldo2012}, private_publication = {false}, abstract = {In this paper an automatic image segmentation methodology based on Multiple Kernel Learning (MKL) is proposed. In this regard, we compute some image features for each input pixel, and then combine such features by means of a MKL framework. We automatically fix the weights of the MKL approach based on a relevance analysis over the original input feature space. Moreover, an unsupervised image segmentation measure is used as a tool to establish the employed kernel free parameter. A Kernel Kmeans algorithm is used as spectral clustering method to segment a given image. Experiments are carried out aiming to test the efficiency of the incorporation of weighted feature information into clustering procedure, and to compare the performance against state of the art algorithms, using a supervised image segmentation measure. Attained results show that our approach is able to compute a meaningful segmentations, demonstrating its capability to support further vision computer applications. © Springer-Verlag Berlin Heidelberg 2012.}, bibtype = {inbook}, author = {Molina-Giraldo, S. and Álvarez-Meza, A. M. and Peluffo-Ordoñez, D. H. and Castellanos-Domínguez, G.}, doi = {10.1007/978-3-642-34654-5_51}, chapter = {Image Segmentation Based on Multi-Kernel Learning and Feature Relevance Analysis}, title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Effect of latency on clustering of P300 recordings for ADHD discrimination}, type = {inproceedings}, year = {2012}, pages = {5202-5205}, websites = {http://ieeexplore.ieee.org/document/6347166/}, month = {8}, publisher = {IEEE}, id = {ea4342f9-d52b-3e9e-86f8-44b8aa7ea164}, created = {2020-12-29T22:52:16.862Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.758Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2012}, private_publication = {false}, abstract = {This paper is focused on testing the latency contribution as regards the quality of formed groups for discriminating between healthy and attention deficit hyperactivity disorder children. To this end, two different cases are considered: nonaligned original recordings and aligned signals according to P300 position. For latter case, a novel approach to conduct time location of P300 component is introduced, which is based on derivative of event-related potential signals. The used database holds event-related potentials registered in auditory and visual oddball paradigm. Several experiments are carried out testing both configurations of considered data matrix. For grouping input data matrices, the k-means clustering technique is employed. To assess the quality of formed clusters and the relevance for clustering of latency-based features, relative values of distances between centroids and data points are computed in order to apprise separability and compactness of estimated clusters. Experimental results show that time localization of P300 component is not a decisive feature in formation of compact and well-defined groups within a discrimination framework for two considered data classes under certain conditions. © 2012 IEEE.}, bibtype = {inproceedings}, author = {Peluffo-Ordonez, D. H. and Martinez-Vargas, J. D. and Castellanos-Dominguez, G.}, doi = {10.1109/EMBC.2012.6347166}, booktitle = {2012 Annual International Conference of the IEEE Engineering in Medicine and Biology Society} }
@article{ title = {Weighted time series analysis for electroencephalographic source localization}, type = {article}, year = {2012}, keywords = {Brain mapping,Inverse problem,Weighting matrix}, websites = {http://www.scielo.org.co/scielo.php?script=sci_arttext&pid=S0012-73532012000600008}, id = {abca9b4c-308a-3dd3-add0-25a32dd630b9}, created = {2020-12-29T22:52:16.902Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.730Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Giraldo2012}, private_publication = {false}, abstract = {This paper presents a new method to estimate neural activity from electroencephalographic signals using a weighted time series analysis. The method considers a physiologically based linear model that takes both spatial and temporal dynamics into account and a weighting stage to modify the assumptions of the model from observations. The calculated weighting matrix is included in the cost function used to solve the dynamic inverse problem, and therefore in the Kalman filter formulation. In this way, a weighted Kalman filtering approach is proposed including a preponderance matrix. The filter's performance (in terms of localization error) is analyzed for several SNRs. The optimal performance is achieved using the linear model with a weighting matrix computed by an inner product method.}, bibtype = {article}, author = {Giraldo, Eduardo and Peluffo-Ordoñez, Diego and Castellanos-Domínguez, Germán}, journal = {(Prueba) DYNA (Prueba)} }
@inproceedings{ title = {Spectral analysis of electric current in LEDs Lamps}, type = {inproceedings}, year = {2011}, keywords = {8,compact fluorescent lamps,fourier transform,frequency representation,representations oriented to estimate,signals are analyzed in,spectral analysis,terms of frequency-based,the power spectral density}, id = {fcbd27ba-767d-3c08-b944-fbe4c185495a}, created = {2021-11-14T03:44:28.048Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T03:44:30.552Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This work presents an analysis of electric current signal in compact fluorescent lamps (CFLs). Electrical signals are measured in two circuits, one that corresponds to the CFL shunt connected with the AC source and another one that incorporates a control system into the CFL. Such control system works as a power factor correction (PFC) and is designed by employing a boost converter and a current controller. Signals are analyzed in terms of frequency-based representations oriented to estimate the power spectral density (PSD). In this study, three approaches are employed: Fourier transform, periodogram and a window-based PSD. The goal of this work is to show that more complex PSD estimation methods can provide useful information for studying the quality energy in electric power systems. Proposed spectral analysis represents an alternative to traditional approaches.}, bibtype = {inproceedings}, author = {Peluffo-Ordóñez, D. H. and Revelo-Fuelagan, E. J. and Alarcón-Lancheros, G. A. and Diaz-Betancourt, O. M.}, booktitle = {SICEL 2011} }
@inproceedings{ title = {ADHD identification based on a linear projection and clustering}, type = {inproceedings}, year = {2011}, keywords = {adhd,clustering,erp,linear projection}, websites = {https://www.researchgate.net/publication/303722482_ADHD_identification_based_on_a_linear_projection_and_clustering}, id = {b50c0005-a289-323f-8b0b-dfe069795528}, created = {2021-11-14T03:44:28.066Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T03:48:49.803Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Abstract Event-related potentials (ERPs) are electrical signals from brain genera- ted as a response to an external sensorial stimulus. This kind of signals are widely used to diagnose neurological disorders, such as Attention-deficit hyperactivity disorder (ADHD). In this paper, a novel methodology for ADHD discrimination is propo- sed, which consist of obtaining a new data representation by means of a re-characterization of initial feature space. Such re-characterization is done through the distances between data and centroids obtained from k-means algorithm. This methodology also includes pre-clustering and liner projec- tion stages. In addition, this paper explores the use of morphological and spectral features as descriptive patterns of ERP signal in order to discri- minate between normal subjects and ADHD patients. Experimental results show that the morphological features, in contrast with the remaining featu- res considered in this study, are those that more contribute to classification performance, reaching 86% for the original feature set.}, bibtype = {inproceedings}, author = {Castro-cabrera, P. A. and Peluffo-Ordóñez, D. H. and Restrepo, F. and Castellanos-dominguez, G.}, booktitle = {SIPAIM 2011} }
@inproceedings{ title = {A Comparative Study of Weighting Factors for WPCA Based on a Generalized Distance Measure}, type = {inproceedings}, year = {2011}, websites = {https://www.researchgate.net/publication/303722669_A_Comparative_Study_of_Weighting_Factors_for_WPCA_Based_on_a_Generalized_Distance_Measure}, id = {c5283b0b-4c87-3853-8287-22cbbfc90791}, created = {2021-11-14T03:44:28.083Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T03:47:28.562Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {inproceedings}, author = {Castano, J S and Garcia-Vega, Sergio and Peluffo Ordóñez, Diego Hernán and Castellanos-Domínguez, German}, booktitle = {XV SIMPOSIO DE TRATAMIENTO DE SEÑALES, IMÁGENES Y VISIÓN ARTIFICIAL -STSIVA 2011} }
@inproceedings{ title = {Identification of cardiac arrhythmias by means of Wavelet Packet-based features}, type = {inproceedings}, year = {2011}, keywords = {atrial fibrillation,ecg,energy,wavelet packet}, websites = {https://www.researchgate.net/publication/303722554_Identification_of_cardiac_arrhythmias_by_means_of_Wavelet_Packet-based_features}, id = {48b04828-225b-303e-938c-a1b29bc07d65}, created = {2021-11-14T03:44:28.095Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T03:48:49.821Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Cardiac arrhythmias are important indicators of heart diseases, they refer to electrical conduction problems and therefore their diagnosis is of high clinical interest. However, timely detection is difficult due to factors such as computational cost, large amount of heartbeats per record, morphology variability, infrequency and irregularity of pathological heartbeats. In this work, wavelet transform computed through wavelet packets is applied over electrocardiographic (ECG) signals as a method to characterize and identify normal ECG signals and some arrhythmias such as atrial fibrilation (AF) and life threaded arrhythmias, drawn from MIT databases.}, bibtype = {inproceedings}, author = {Martínez-Tabares, F. J. and Peluffo-Ordóñez, D. H. and Castro-Hoyos, C. and Castellanos-Domínguez, G.}, booktitle = {SIPAIM 2011} }
@inproceedings{ title = {On the groups number estimation for unsupervised clustering}, type = {inproceedings}, year = {2011}, issue = {4}, websites = {https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.717.9431&rep=rep1&type=pdf}, id = {21ac195b-c45b-31b3-b3e9-f14c450735cb}, created = {2021-11-14T03:44:28.106Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T03:47:28.523Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {—Category 2. Clustering techniques usually requires manually set parameters so the classification task may be correctly carried out, one of the most common being the number of groups or clusters in which data should be separated, yet this relies in a prior knowledge of the data nature. In this work a comparison among different approaches for finding the number of groups is shown, such as singular value decomposition (SVD), analysis of the multiplicity of the greatest eigenvalues from the affinity matrix, and the percentage of the cumulative sum of the singular values of the affinity matrix. The spectral nature of the estimation process as well as the different datasets used, infers that the results rely only in the internal information of data matrixes. Results exhibits both limitations and advantages for each method, weather directly related with the nature of the data, or limited by the process structure and definition. Nonetheless these guidelines will be helpful for deciding which estimation technique best applies for clustering data regardless its origin.}, bibtype = {inproceedings}, author = {Castro, C and Castro, A E and Peluffo, D H and Castellanos, G}, booktitle = {XV SIMPOSIO DE TRATAMIENTO DE SEÑALES, IMÁGENES Y VISIÓN ARTIFICIAL -STSIVA 2011} }
@inproceedings{ title = {Region of interest extraction using redundant wavelet transform and unsupervised techniques on thermal imaging}, type = {inproceedings}, year = {2010}, websites = {http://qirt.gel.ulaval.ca/archives/qirt2010/papers/QIRT 2010-103.pdf}, publisher = {QIRT Council}, id = {f07c793c-b118-3f79-8a92-e622bf20d4d0}, created = {2020-12-29T22:52:03.246Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.525Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Ortiz-Jaramillo2010}, private_publication = {false}, bibtype = {inproceedings}, author = {Ortiz-Jaramillo, B. and Garcia-Álvarez, J. and Rodríguez-Sotelo, J. and Peluffo-Ordóñez, D. and Castellanos-Domínguez, G.}, doi = {10.21611/qirt.2010.103}, booktitle = {Proceedings of the 2010 International Conference on Quantitative InfraRed Thermography} }
@inproceedings{ title = {Weighted-PCA for unsupervised classification of cardiac arrhythmias}, type = {inproceedings}, year = {2010}, pages = {1906-1909}, websites = {http://ieeexplore.ieee.org/document/5627321/}, month = {8}, publisher = {IEEE}, id = {3506d8af-c38c-3aed-8316-38a721957a59}, created = {2020-12-29T22:52:14.354Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.356Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rodriguez-Sotelo2010}, private_publication = {false}, abstract = {A method that improves the feature selection stage for non-supervised analysis of Holter ECG signals is presented. The method corresponds to WPCA approach developed mainly in two stages. First, the weighting of the feature set through a weight vector based on M-inner product as distance measure and a quadratic optimization function. The second one is the linear projection of weighted data using principal components. In the clustering stage, some procedures are considered: estimation of the number of groups, initialization of centroids and grouping by means a soft clustering algorithm. In order to decrease the procedure computational cost, segment analysis, grouping contiguous segments and establishing union and exclusion criteria per each cluster, is carried out. This work is focused to classify cardiac arrhythmias into 5 groups, according to the standard of the AAMI (ANSI/AAMI EC57:1998/2003). To validate the method, some recordings from MIT/BIH arrhythmia database are used. By employing the labels of each recording, the performance is assessed with supervised measures (Se = 90.1%, Sp = 98.9% y Cp = 97.4%), enhancing other works in the literature that do not take into account all heartbeat types. © 2010 IEEE.}, bibtype = {inproceedings}, author = {Rodríguez-Sotelo, J L and Delgado-Trejos, E. and Peluffo-Ordóñez, D and Cuesta-Frau, D. and Castellanos-Domínguez, G}, doi = {10.1109/IEMBS.2010.5627321}, booktitle = {2010 Annual International Conference of the IEEE Engineering in Medicine and Biology} }
@inproceedings{ title = {Analysis of the effects of Linear and Non-linear distortions on QPSK modulated signals for optical channels}, type = {inproceedings}, year = {2010}, keywords = {linear}, pages = {210}, websites = {https://www.researchgate.net/publication/303722369_Analysis_of_the_effects_of_linear_and_non-linear_distortions_on_QPSK_modulated_signals_for_optical_channels}, id = {b4ca7014-7508-34ac-844f-ee6b919d4c9d}, created = {2021-01-31T16:47:15.246Z}, file_attached = {true}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:36.660Z}, read = {true}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Salazar2010}, private_publication = {false}, abstract = {Resumen-En este artículo se presenta algunos de los alcances logrados de la tesis de pregrado titulada "Analysis of the effects of linear and non-linear distortions on QPSK modulated signals for optical channels" desarrollado por los dos primeros autores. La pertinencia de este trabajo radica en que, actualmente, el procesamiento digital de señales ha tomado un importante lugar en las comunicaciones, en particular, en comunicaciones ópticas. Los sistemas actuales de transmisión son soportados por dispositivos con DSP para tareas de filtración y ecualización. En el caso de la filtración, la primer paso es interpretar la distorsión y, en el mejor de los casos, obtener un modelo. En este trabajo se presenta un breve estado del arte de las distorsiones lineales y no lineales en canales de fibra óptica desarrolladas en los últimos trece años. También se presenta un marco teórico concreto y útil acerca de distorsiones y formatos de modulación digital orientado a las comunicaciones ópticas. En tanto a las distorsiones se da una definición y se estudia el efecto sobre la constelación de representación. Para los formatos de modulación digital se presenta un diagrama de bloques y ecuaciones generalizadas. También, se introduce un modelo de distorsión lineal para el formato QPSK. Palabras clave-Distorsión lineal, fibra óptica, modulación digital. Abstract-In this paper, we present some achievements of the degree thesis entitled "Analysis of the effects of linear and non-linear distortions on QPSK modulated signals for optical channels" developed by the two first authors. The pertinence of this work lies in that digital signal processing have currently taken an important place in Communications, in particular, Optical Communications. Current transmission systems are supported by DSP based devices to carry out filtering and equalization tasks. For filtering, the first stage is to understand the distortion and establish a model (when possible). In this work, we present a brief state of the art of linear and non-linear distortions in optical channels developed in the last thirteen years. Also, we present an useful and specific theoretical background about distortions and digital modulation formats oriented to optical communications. Distortions are defined and the effect over representation constellation is studied. For each digital modulation format, we present a block diagram and generalized equations. Additionally, a linear distortion model for QPSK format is introduced.}, bibtype = {inproceedings}, author = {Salazar, Johana and Vergara, Mateo and Peluffo, Diego and Diaz, Oscar} }
@phdthesis{ title = {Estudio comparativo de métodos de agrupamiento no supervisado de latidos de señales ECG}, type = {phdthesis}, year = {2009}, source = {Tesis Maestria}, websites = {https://repositorio.unal.edu.co/handle/unal/69982}, id = {96d5408f-40b0-3b3a-b5c0-3e15a1592663}, created = {2020-12-29T22:52:03.354Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:35.743Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {PeluffoOrdonez2009}, private_publication = {false}, abstract = {En este trabajo se presenta un estudio comparativo de diferentes técnicas de agrupamiento no supervisado con enfoque particional y espectral, orientado a la agrupación de patrones representativos de latidos extraídos de registros electrocardiográficos ambulatorios. Debido a la naturaleza de las señales estudiadas y a que, en muchos casos, no es factible el etiquetado de las mismas, se prefieren las técnicas de agrupamiento no supervisado para su análisis. El uso de un modelo genérico de agrupamiento particional y la estimación de parámetros de inicialización adecuados empleando técnicas espectrales, son algunos de los aportes m´as significativos de esta investigación. Los experimentos se realizan sobre una base de datos de arritmias estándar del MIT (Massachusetts Institute of Technology) y la extracción de características se hace con técnicas recomendadas por la literatura. Otro aporte importante, es el desarrollo de un método de análisis por segmentos que reduce el costo computacional y mejora el desempeño del agrupamiento en comparación con el análisis tradicional, es decir, analizando todo el conjunto de datos en una sola iteracción del procedimiento. Adicionalmente, se sugiere un esquema completo de análisis no supervisado de señales ECG, incluyendo etapas de caracterización, selección de características, estimación del número de grupos, inicialización y agrupamiento.}, bibtype = {phdthesis}, author = {Peluffo Ordóñez, Diego Hernán} }
@inproceedings{ title = {Unsupervised feature selection in cardiac arrhythmias analysis}, type = {inproceedings}, year = {2009}, pages = {2571-2574}, websites = {http://ieeexplore.ieee.org/document/5335284/}, month = {9}, publisher = {IEEE}, id = {6f492d75-d925-3f6b-8b0f-586e13870dfc}, created = {2020-12-29T22:52:14.023Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.331Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rodriguez-Sotelo2009}, private_publication = {false}, abstract = {The problem of detecting clinical events related to cardiac arrhythmias in long term electrocardiograms is a difficult one due to the large amount of irrelevant information that hides such events. This problem has been addressed in the literature by means of clustering or classification algorithms that create data partitions according to a cost function based on heartbeat features dissimilarity measures. However, studies about the type or number of heartbeat features is lacking. Usually, the feature sets used are relevant but redundant, which degrades algorithm performance. This paper describes a method for automatic selection of heartbeat features. This method is assessed using real signals from the MIT database and common features used in previous works. ©2009 IEEE.}, bibtype = {inproceedings}, author = {Rodriguez-Sotelo, J.L. and Cuesta-Frau, D. and Peluffo-Ordonez, D. and Castellanos-Dominguez, G.}, doi = {10.1109/IEMBS.2009.5335284}, booktitle = {2009 Annual International Conference of the IEEE Engineering in Medicine and Biology Society} }
@inproceedings{ title = {Nonparametric density-based clustering for cardiac arrhythmia analysis}, type = {inproceedings}, year = {2009}, websites = {https://ieeexplore.ieee.org/document/5445342/versions}, id = {d0f17fd1-c7ee-3f13-9753-e0c7d243decc}, created = {2020-12-29T22:52:14.622Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:34.455Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Rodriguez-Sotelo2009a}, private_publication = {false}, abstract = {In this work, a nonsupervised algorithm for feature selection and a non-parametric density-based clustering algorithm are presented, whose density estimation is performed by Parzen's window approach; this algorithm solves the problem that individual components of the mixture should be Gaussian. The method is applied to a set of recordings from MIT/BIH's arrhythmia database with five groups of arrhythmias recommended by the AAMI. The heartbeats are characterized using prematurity indices, morphological and representation features, which are selected with the Q-α algorithm. The results are assessed by means supervised (Se, Sp, Sel) and nonsupervised indices for each arrhythmia. The proposed system presents comparable results than other unsupervised methods of literature.}, bibtype = {inproceedings}, author = {Rodríguez-Sotelo, Jośe Luis and Peluffo-Ordoñez, D. and Cuesta-Frau, D. and Castellanos-Domínguez, G.}, booktitle = {Computers in Cardiology} }
@article{ title = {Estudio Comparativo de Métodos de Selección de Características de Inferencia Supervisada y No Supervisada}, type = {article}, year = {2009}, pages = {149}, websites = {https://revistas.itm.edu.co/index.php/tecnologicas/article/view/239}, month = {12}, day = {20}, id = {820a0189-4b0b-32d7-811f-5bd4cbf99064}, created = {2020-12-29T22:52:16.716Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-02-20T22:05:32.712Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, citation_key = {Peluffo-Ordonez2009}, private_publication = {false}, abstract = {En este trabajo se presenta un estudio comparativo de algunos métodos de selección de características de inferencia supervisada y no supervisada derivados del algoritmo PCA clásico. Se deduce una función objetivo de PCA a partir del error cuadrático medio de los datos y su proyección sobre una base ortonormal, y se extiende este concepto para derivar una expresión asociada al algoritmo fundamental de WPCA. Adicionalmente, se estudian los algoritmos Q - α supervisado y no supervisado y se explica su relación con PCA. Se presentan resultados empleando dos conjuntos de datos: Uno de baja dimensión para estudiar los efectos de la rotación ortogonal y la dirección de los componentes principales y otro de alta dimensión para evaluar los resultados de clasificación. Los métodos de selección de características fueron evaluados teniendo en cuenta la cantidad de características relevantes obtenidas, costo computacional y resultados de clasificación. La clasificación se realizó con un algoritmo particional de agrupamiento no supervisado.}, bibtype = {article}, author = {Peluffo-Ordóñez, Diego H. and Rodríguez-Sotelo, José L. and Castellanos-Domínguez, Germán}, doi = {10.22430/22565337.239}, journal = {TecnoLógicas}, number = {23} }
@inproceedings{ title = {Detección de arritmias de tipo bloqueo de rama mediante análisis no supervisado y morfología del qrs}, type = {inproceedings}, year = {2008}, keywords = {arritmias,bloqueo de rama,clustering,ecg,s}, pages = {1-6}, issue = {x}, websites = {https://www.researchgate.net/publication/282611786_DETECCION_DE_ARRITMIAS_DE_TIPO_BLOQUEO_DE_RAMA_MEDIANTE_ANALISIS_NO_SUPERVISADO_Y_MORFOLOGIA_DEL_QRS}, id = {e5c9c769-1dfa-3766-87ef-1337cb59e3c8}, created = {2021-11-14T04:09:08.799Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T04:10:20.236Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Se presenta un método para clasificar arritmias ventriculares de tipo bloqueo de rama izquierda (L) y derecha (R) con respecto a latidos normales (N) de la base de datos de arritmias de la MIT-BIH utilizando clasificación no supervisada debido principalmente a la variabilidad morfológica entre registros. Se desarrolla una etapa de extracción de características basada en la morfología del latido y una etapa de clustering que utiliza el algoritmo de búsqueda heurística k-means modificado en el sentido de su inicialización utilizando el criterio max-min. El sistema presenta resultados comparables con los reportados en la literatura.}, bibtype = {inproceedings}, author = {Peluffo-Ordóñez, Diego Hernán and Rodríguez-Sotelo, Jośe Luis and Castellanos-Domínguez, Germán}, booktitle = {SIB 2008} }
@inproceedings{ title = {Metodología para la reconstrucción y extracción de caracaterísticas del complejo QRS basada en el modelo parmétrico de Hermite}, type = {inproceedings}, year = {2008}, websites = {https://www.diegopeluffo.com/publicaciones/STIEE2008.pdf}, id = {d546ee4f-741c-392b-9d41-a39eefb8bd46}, created = {2021-11-14T04:09:08.804Z}, file_attached = {false}, profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3}, last_modified = {2021-11-14T04:10:20.226Z}, read = {false}, starred = {false}, authored = {true}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {—En este trabajo se desarrolla una metodología para la reconstrucción y caracterización de los complejos QRS empleando el modelo paramétrico de Hermite. Los complejos son extraídos de la base de datos MIT -BIH. La reconstrucción se realiza empleando el valo optimo del parámetro de escala de las bases de Hermite obtenido mediante la minimización de la disimilitud de la s nal original y la reconstruida. Se emplea DTW como medida de disimilitud. Adicionalmente, se presenta un método para obtener la cantidad mínima de bases que generan una reconstrucción con alta confiabilidad basado en la comparación de los espectros de frecuencia en el rango de 1 − 20 Hz. La evaluación de la caracterización se realiza mediante el algoritmo de clustering K-means Max-Min. I. INTRODUCC ON El electrocardiograma (ECG) es la prueba diagnóstica más importante y definitiva para el análisis del compor-tamiento eléctrico del corazón [1]. Además, es de uso muy frecuente por tratarse de una técnica no invasiva. El test electrocardiográfico más común es el ECG de superficie de 12 derivaciones que se realiza por un breve periodo de tiempo. Sin embargo algunas patologías cardíacas transitorias y de alta variabilidad espontánea en el ritmo y frecuencia no puden detectarse en este tipo de test. Por esta razón existe la electrocardiografía ambulatoria que se realiza para evaluar al paciente durante prolongados periodos de tiempo, sin alterar su actividad diaria, lo que permite el examen dinámico del ECG en su ambiente natural. Los registros obtenidos del test ambulatorio se les conoce como registros Holter (Holter, 1961). Debido a la extensión de los registros Holter, la inspección visual resulta una tarea compleja para el especialista. Por esta razón se han desarrollado sistemas de detección automatizada, aunque aún exiten problemas abiertos relacionados, principalmente, con la cantidad de latidos y costo computacional.}, bibtype = {inproceedings}, author = {Peluffo, D and Rodríguez, J L and Castellanos, C G}, booktitle = {Semana Técnica de Ingeniería Eléctrica y Electrónica} }