<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2168152%2Fitems%3Fkey%3DVCdsaROd5deDY3prqqG8kI0c%26format%3Dbibtex%26limit%3D100&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2168152%2Fitems%3Fkey%3DVCdsaROd5deDY3prqqG8kI0c%26format%3Dbibtex%26limit%3D100");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2168152%2Fitems%3Fkey%3DVCdsaROd5deDY3prqqG8kI0c%26format%3Dbibtex%26limit%3D100"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@inproceedings{Dawod_2023, title = {{NFT} {Appraisal} {Using} {Machine} {Learning}}, isbn = {978-1-4503-9950-0}, url = {http://dx.doi.org/10.1145/3588155.3588181}, doi = {10.1145/3588155.3588181}, abstract = {Non-Fungible Tokens (NFTs) are digital assets based on a blockchain and those are characterized as unique cryptographic tokens and non-interchangeable. To date, research into the NFT marketplace has been relatively limited. As it is an emerging platform with many unique elements, The NFT market has been impacted due to recent fluctuations in crypto-asset markets more broadly. This current bear market cycle has shed light on concerns around the value of NFTs, profit-based motivation, and environmental sustainability. However, periods of volatility and cyclicality are to be expected with any nascent technology as it develops a product-market fit. consequently, the appraisal of real-price for NFT collections is essential for individual financial security and investment making. In this study, we evaluate the machine learning algorithms to appraise their real-price based on NFT item's characteristics, market event information, and their rarity score data acquired by retrieved from the biggest marketplace OpenSea. Furthermore, the procedures were applied to meet the objectives of this study we built prediction models based on various machine-learning algorithms ranging from Random Forest, XGBoost, SVM, Lasso, ElasticNet, Ridge, Linear Polynomial Regression, TabNet, CatBoost, and LightGBM models. From the results, LightGBM regression model outperformed the other by RMSE around 0.905. The best R2 is only found in this model, which has a value of 0.917.}, booktitle = {{ACM} {International} {Conference} {Proceeding} {Series}}, publisher = {ACM}, author = {Dawod, Ahmed Dawod Mohammed and Munkhdalai, Lkhagvadorj and Park, Kwang Ho and Ryu, Keun Ho and Pham, Van Huy}, month = feb, year = {2023}, note = {Series Title: APIT 2023}, keywords = {Blockchain, CatBoost, ElasticNet, Lasso, LightGBM, Linear and Polynomial Regression, Non-Fungible Tokens, Random Forest, Ridge, SVM, TabNet, XGBoost}, pages = {160--166}, }
@article{Munkhdalai_2023, title = {Discrimination {Neural} {Network} {Model} for {Binary} {Classification} {Tasks} on {Tabular} {Data}}, volume = {11}, issn = {21693536}, url = {http://dx.doi.org/10.1109/ACCESS.2023.3243919}, doi = {10.1109/ACCESS.2023.3243919}, abstract = {For the classification task, neural network-based approaches attempt to distinguish between two distributions by determining the joint distribution of input variables for each class. However, the most challenging task is still to classify the observations in the overlapping region of two classes. In this work, we propose a new discrimination neural network (DiscNN) architecture to address this issue. Our DiscNN learns to embed the initial input into more informative representations with better discriminability between the two distributions based on the cosine embedding loss. We also train our proposed model using the few-shot learning method to extract better-generalized representations from the initial input. We applied the DiscNN model to 35 tabular datasets from the OpenML-CC18 benchmark for a binary classification task. Our model showed superior performances on 28 datasets of them. In addition, we also performed experiments on 95 imbalanced datasets from the KEEL repository. The experiment results showed that the DiscNN outperformed the state-of-the-art models, including CatBoost, LightGBM, TabNet, VIME and Scarf, by around 0.23\% AUC, 0.20\% G-mean, and 1.06\% F1 score.}, journal = {IEEE Access}, author = {Munkhdalai, Lkhagvadorj and Munkhdalai, Tsendsuren and Hong, Jang Eui and Pham, Van Huy and Theera-Umpon, Nipon and Ryu, Keun Ho}, year = {2023}, note = {Publisher: Institute of Electrical and Electronics Engineers (IEEE)}, keywords = {Neural network, classification task, cosine similarity, imbalanced problem, tabular data}, pages = {15404--15418}, }
@article{Munkhdalai_2022, title = {Recurrent {Neural} {Network}-{Augmented} {Locally} {Adaptive} {Interpretable} {Regression} for {Multivariate} {Time}-{Series} {Forecasting}}, volume = {10}, issn = {21693536}, url = {https://doi.org/10.1109%2Faccess.2022.3145951}, doi = {10.1109/ACCESS.2022.3145951}, abstract = {Explaining dynamic relationships between input and output variables is one of the most important issues in time dependent domains such as economic, finance and so on. In this work, we propose a novel locally adaptive interpretable deep learning architecture that is augmented by recurrent neural networks to provide model explainability and high predictive accuracy for time-series data. The proposed model relies on two key aspects. First, the base model should be a simple interpretable model. In this step, we obtain our base model using a simple linear regression and statistical test. Second, we use recurrent neural networks to re-parameterize our base model to make the regression coefficients adaptable for each time step. Our experimental results on public benchmark datasets showed that our model not only achieves better predictive performance than the state-of-the-art baselines, but also discovers the dynamic relationship between input and output variables.}, journal = {IEEE Access}, author = {Munkhdalai, Lkhagvadorj and Munkhdalai, Tsendsuren and Pham, Van Huy and Li, Meijing and Ryu, Keun Ho and Theera-Umpon, Nipon}, year = {2022}, note = {Publisher: Institute of Electrical and Electronics Engineers (\{IEEE\})}, keywords = {Explainable AI, linear regression, recurrent neural network, time-series forecasting}, pages = {11871--11885}, }
@article{2021, title = {Algorithms and devices for smart processing technology for energy saving}, volume = {2021}, issn = {15635147}, url = {https://doi.org/10.1155%2F2021%2F9853615}, doi = {10.1155/2021/9853615}, journal = {Mathematical Problems in Engineering}, author = {Lee, Sanghyuk and Nayel, Mohamed and Pham, Van Huy and Rhee, Sang Bong}, month = aug, year = {2021}, note = {Publisher: Hindawi Limited}, pages = {1--2}, }
@article{Pham2021, title = {Search-{Based} {Planning} and {Reinforcement} {Learning} for {Autonomous} {Systems} and {Robotics}}, volume = {984}, issn = {18609503}, url = {https://link.springer.com/10.1007/978-3-030-77939-9_14}, doi = {10.1007/978-3-030-77939-9_14}, abstract = {In this chapter, we address the competent Autonomous Vehicles should have the ability to analyze the structure and unstructured environments and then to localize itself relative to surrounding things, where GPS, RFID or other similar means cannot give enough information about the location. Reliable SLAM is the most basic prerequisite for any further artificial intelligent tasks of autonomous mobile robots. The goal of this paper is to simulate a SLAM process on advanced software development. The model represents the system itself, whereas the simulation represents the operation of the system over time. And the software architecture will help us to focus our work to realize our wish with least trivial work. It is an open-source meta-operating system, which provides us tremendous tools for robotics related problems. Specifically, we address the advanced vehicles should have the ability to analyze the structured and unstructured environment based on solving the search-based planning and then we move to discuss interested in reinforcement learning-based model to optimal trajectory in order to apply to autonomous systems.}, journal = {Studies in Computational Intelligence}, author = {Le, Than and Hung, Bui Thanh and Van Huy, Pham}, year = {2021}, keywords = {Extended kalman filter, Kalman filter, Modelling, Monte Corlo, Probabilistics robotics, Q-Learning, Reinforcement learning, SLAM, Search-based planning, Simulation}, pages = {481--501}, }
@article{Pham2021, title = {Reliability analysis in grid system considering load}, volume = {24}, issn = {15737543}, doi = {10.1007/s10586-021-03241-3}, abstract = {This paper discusses the load-balanced task allocation problem in the grid transaction processing system to maximize reliability. Finding the solution of optimal load-balanced task allocation is known to be an NP-hard. This paper presents a load-balanced task allocation based cuckoo search-ant colony optimization (LBTA\_CSACO) method for this problem. The LBTA\_CSACO is based on the cooperative behavior of cuckoo search and ant colony optimization to nd a collection of task allocation solutions. This paper also models the reliability of the system with the help of the availability of the system. The motive of this work is to maximize the reliability of the system. After the simulation, six existing algorithms are taken for the comparison of the results; Honey Bee Optimization (HBO), Ant Colony Optimization (ACO), Hierarchical Load Balanced Algorithm (HLBA), Dynamic and Decentralized Load Balancing (DLB), and Randomized respectively.}, number = {4}, journal = {Cluster Computing}, author = {Mahato, Dharmendra Prasad and Pham, Van Huy}, year = {2021}, keywords = {Ant colony optimization, Cuckoo search, Grid computing, Reliability analysis, Transaction processing}, pages = {2883--2896}, }
@article{Amarbayasgalan_2021, title = {An {Efficient} {Prediction} {Method} for {Coronary} {Heart} {Disease} {Risk} {Based} on {Two} {Deep} {Neural} {Networks} {Trained} on {Well}-{Ordered} {Training} {Datasets}}, volume = {9}, issn = {21693536}, url = {https://doi.org/10.1109%2Faccess.2021.3116974}, doi = {10.1109/ACCESS.2021.3116974}, abstract = {This study proposes an efficient prediction method for coronary heart disease risk based on two deep neural networks trained on well-ordered training datasets. Most real datasets include an irregular subset with higher variance than most data, and predictive models do not learn well from these datasets. While most existing prediction models learned from the whole or randomly sampled training datasets, our suggested method draws up training datasets by separating regular and highly biased subsets to build accurate prediction models. We use a two-step approach to prepare the training dataset: (1) divide the initial training dataset into two groups, commonly distributed and highly biased using Principal Component Analysis, (2) enrich the highly biased group by Variational Autoencoders. Then, two deep neural network classifiers learn from the isolated training groups separately. The well-organized training groups enable a chance to build more accurate prediction models. When predicting the risk of coronary heart disease from the given input, only one appropriate model is selected based on the reconstruction error on the Principal Component Analysis model. Dataset used in this study was collected from the Korean National Health and Nutritional Examination Survey. We have conducted two types of experiments on the dataset. The first one proved how Principal Component Analysis and Variational Autoencoder models of the proposed method improves the performance of a single deep neural network. The second experiment compared the proposed method with existing machine learning algorithms, including Naïve Bayes, Random Forest, K-Nearest Neighbor, Decision Tree, Support Vector Machine, and Adaptive Boosting. The experimental results show that the proposed method outperformed conventional machine learning algorithms by giving the accuracy of 0.892, specificity of 0.840, precision of 0.911, recall of 0.920, f-measure of 0.915, and AUC of 0.882.}, journal = {IEEE Access}, author = {Amarbayasgalan, Tsatsral and Pham, Van Huy and Theera-Umpon, Nipon and Piao, Yongjun and Ryu, Keun Ho}, year = {2021}, note = {Publisher: Institute of Electrical and Electronics Engineers (\{IEEE\})}, keywords = {Coronary heart disease, deep neural network, machine learning, principal component analysis, reconstruction error, variational autoencoder}, pages = {135210--135223}, }
@article{doi:10.1080/10556788.2020.1713778, title = {Inverse stable point problem on trees under an extension of {Chebyshev} norm and {Bottleneck} {Hamming} distance}, volume = {36}, issn = {10294937}, url = {https://doi.org/10.1080/10556788.2020.1713778}, doi = {10.1080/10556788.2020.1713778}, abstract = {In the inverse optimization problem, we modify parameters of the original problem at minimum total cost so as to make a prespecified solution optimal with respect to new parameters. We extend in this paper a class of inverse single facility problems on trees, including inverse balance point, inverse 1-median and inverse 1-center problem, and call it the inverse stable point problem. For the general situation where variables are both edge lengths and vertex weights under an extension of Chebyshev norm and bottleneck Hamming distance, we first derive an algorithm that reduces the corresponding problem to the one under either Chebyshev norm or bottleneck Hamming distance and then develop an approximation approach for the problem. Special cases concerning the problem under this extension with strongly polynomial time algorithms are also discussed.}, number = {4}, journal = {Optimization Methods and Software}, author = {Pham, Van Huy and Nguyen, Kien Trung and Le, Tran Thu}, year = {2021}, note = {Publisher: Taylor \& Francis}, keywords = {Chebyshev norm, Hamming distance, Location problem, inverse optimization, tree}, pages = {755--772}, }
@article{Pham2021, title = {Shared {Representation} with {Multi}-omics {Distributed} {Latent} {Spaces} for {Cancer} {Subtype} {Classification}}, volume = {212}, issn = {21903026}, doi = {10.1007/978-981-33-6757-9_52}, abstract = {The integration of multi-omics data is suitable for early detection and is also significant to a wide variety of cancer detection and treatment fields. Accurate prediction of survival in cancer patients remains a challenge due to the ever-increasing heterogeneity and complexity of cancer. The latest developments in high-throughput sequencing technologies have rapidly produced multi-omics data of the same cancer sample. Recently, many studies have shown to extract biologically relevant latent features to learn the complexity of cancer by taking advantage of deep learning. In this paper, we propose a Shared representation learning method by employing the Autoencoder structure for Multi-Omics (SAMO) data, which is inspired by the recent success of variational autoencoders to extract biologically relevant features. Variational autoencoders are a deep neural network approach capable of generating meaningful latent spaces. We address the problem of losing information when integrating multiple data sources. We formulate a distributed latent space jointly learned by separated variational autoencoders on each data source in an unsupervised manner. Firstly, we pre-trained the variational autoencoders separately, which produce shared latent representations. Secondly, we fine-tuned only the encoders and latent representations with a supervised classifier for the prediction task. Here, we used a lung cancer multi-omics data combined illumina human methylation 27 K and gene expression RNA seq. datasets from The Cancer Genome Atlas (TCGA) data portal.}, journal = {Smart Innovation, Systems and Technologies}, author = {Ryu, Keun Ho and Park, Kwang Ho and Namsrai, Oyun Erdene and Pham, Van Huy and Batbaatar, Erdenebileg}, year = {2021}, note = {ISBN: 9789813367562}, keywords = {Cancer subtype classification, Computational biology, Deep learning, Multi-omics data, Supervised learning, Variational autoencoder}, pages = {418--425}, }
@article{Pham2021, title = {Bayesian {Meta} {Regression}}, volume = {212}, issn = {21903026}, doi = {10.1007/978-981-33-6757-9_7}, abstract = {This work extends Bayesian regression as an adaptive that augmented by deep neural networks (the probabilistic encoder) to obtain the posterior probability distributions of the regression coefficients. We use variational inference to obtain the conditional distribution over the regression coefficients, which are the latent space given the observed data. Therefore, our model can recognize local conditional probability distribution of the regression coefficients for each observation as well as it can measure the uncertainty of the model locally. Experimental results on the benchmark datasets prove that our Bayesian Meta Regression (BMR) significantly outperforms baseline regression techniques and precisely measures the model uncertainty for each observation.}, journal = {Smart Innovation, Systems and Technologies}, author = {Munkhdalai, Lkhagvadorj and Pham, Van Huy and Ryu, Keun Ho}, year = {2021}, note = {ISBN: 9789813367562}, keywords = {Bayesian regression, Variational autoencoder, Variational inference}, pages = {52--59}, }
@inproceedings{Pham2021, title = {Scalable {Leader} {Election} {Considering} {Load} {Balancing}}, volume = {227}, isbn = {978-3-030-75077-0}, doi = {10.1007/978-3-030-75078-7_27}, abstract = {Distributed computing consists of a model where multiple parts of a system are accessed by different computing machines for the betterment of work efficiency. To get better results, electing a leader is one of the most critical tasks. Leader election is the process of assigning one of the processes as the leader whose work is to organize all the provided jobs that are distributed between the different computer nodes and to provide them with the required resources. The obstacle that is dealt with here is to elect a processor that can act like a leader from among the set of multiple processors using distributed protocols. To explain this in further detail, let us consider that we have n number of processors, and among these n processors, we have n number of processors, and among these n processors we have cn number of processors which can be considered as bad or corrupt and (1-c)n number of processors among them can be considered as good or not corrupt. Here the value of c is a fraction value and is fixed. The problem that is to be dealt with is to select a processor with a probability that has to be constant, a single processor from the given n number of processors which can act as their leader, no matter which set of the given cn processors are bad here. The scalability that is mentioned hereof leader election being scalable refers to the fact that every good or non-corrupt processor that is available amongst the total n number of processors sends and also processes several bits. And these number of bits that are being sent and processed by the various processors are polylogarithmic in n. Here we can say that the number of bits that are sent over or processed by a node belongs to a function that is polynomial in the logarithm of n.}, booktitle = {Lecture {Notes} in {Networks} and {Systems}}, author = {Rani, Radha and Rashpa, Saurabh and Mahato, Dharmendra Prasad and Pham, Van Huy}, year = {2021}, note = {ISSN: 23673389}, pages = {260--269}, }
@inproceedings{Pham2021, title = {Antlion {Optimizer} {Based} {Load}-{Balanced} {Transaction} {Scheduling} for {Maximizing} {Reliability}}, volume = {226 LNNS}, isbn = {978-3-030-75074-9}, doi = {10.1007/978-3-030-75075-6_6}, abstract = {This paper presents a load-balanced transaction scheduling algorithm for maximizing reliability. The algorithm is based on the antlion optimizer method. Maximization of reliability in a grid system is an NP-hard problem. The paper compares the proposed algorithm with existing famous meta-heuristic algorithms. The result analysis shows that the proposed algorithm performs better compared to other algorithms.}, booktitle = {Lecture {Notes} in {Networks} and {Systems}}, author = {Mahato, Dharmendra Prasad and Pham, Van Huy}, year = {2021}, note = {ISSN: 23673389}, pages = {66--79}, }
@article{Pham2021, title = {Adaptive {Softmax} {Regression} for {Credit} {Scoring}}, volume = {212}, issn = {21903026}, doi = {10.1007/978-981-33-6757-9_51}, abstract = {Credit scoring is a classification task from the machine learning perspective. Efficiently classifying bad borrowers is the main aim of building a credit scoring model. This work proposes a novel adaptive softmax regression method for credit scoring. We augment a simple softmax regression by deep neural networks to make its estimated probabilities as an adaptive for each observation. Our experimental result on public benchmark datasets shows that adaptive softmax regression outperformed the machine learning baselines in terms of Brier score, area under the curve (AUC) and accuracy.}, journal = {Smart Innovation, Systems and Technologies}, author = {Munkhdalai, Lkhagvadorj and Davagdorj, Khishigsuren and Pham, Van Huy and Ryu, Keun Ho}, year = {2021}, note = {ISBN: 9789813367562}, keywords = {Adaptive learning, Credit scoring, Decision making, Softmax regression}, pages = {409--417}, }
@inproceedings{Pham2021, title = {A {Subtype} {Classification} of {Hematopoietic} {Cancer} {Using} {Machine} {Learning} {Approach}}, volume = {1371 CCIS}, isbn = {9789811616846}, doi = {10.1007/978-981-16-1685-3_10}, abstract = {Hematopoietic cancer is the malignant transformation in immune system cells. This cancer usually occurs in areas such as bone marrow and lymph nodes, the hematopoietic organ, and is a frightening disease that collapses the immune system with its own mobile characteristics. Hematopoietic cancer is characterized by the cells that are expressed, which are usually difficult to detect in the hematopoiesis process. For this reason, we focused on the five subtypes of hematopoietic cancer and conducted a study on classifying by applying machine learning algorithms both contextual approach and non-contextual approach. First, we applied PCA approach for extracting suited feature for building classification model for subtype classification. And then, we used four machine learning classification algorithms (support vector machine, k-nearest neighbor, random forest, neural network) and synthetic minority oversampling technique for generating a model. As a result, most classifiers performed better when the oversampling technique was applied, and the best result was that oversampling applied random forest produced 95.24\% classification performance.}, booktitle = {Communications in {Computer} and {Information} {Science}}, author = {Park, Kwang Ho and Pham, Van Huy and Davagdorj, Khishigsuren and Munkhdalai, Lkhagvadorj and Ryu, Keun Ho}, year = {2021}, note = {ISSN: 18650937}, keywords = {Gene expression, Hematopoietic cancer, Principal component analysis, Subtype classification, Synthetic minority oversampling technique}, pages = {113--121}, }
@article{Pham2021, title = {Inverse {Group} 1-{Median} {Problem} {On} {Trees}}, volume = {17}, issn = {1553166X}, doi = {10.3934/jimo.2019108}, abstract = {In location theory, group median generalizes the concepts of both median and center. We address in this paper the problem of modifying vertex weights of a tree at minimum total cost so that a prespecified vertex becomes a group 1-median with respect to the new weights. We call this problem the inverse group 1-median on trees. To solve the problem, we first reformulate the optimality criterion for a vertex being a group 1-median of the tree. Based on this result, we prove that the problem is NP-hard. Particularly, the corresponding problem with exactly two groups is however solvable in O(n2 log n) time, where n is the number of vertices in the tree}, number = {1}, journal = {Journal of Industrial and Management Optimization}, author = {Nguyen, Kien Trung and Hieu, Vo Nguyen Minh and Pham, Van Huy}, year = {2021}, keywords = {Group median, complexity, inverse optimization, parameterization, tree}, pages = {221--232}, }
@article{Pham2021, title = {Explainable {Artificial} {Intelligence} {Based} {Framework} for {Non}-{Communicable} {Diseases} {Prediction}}, volume = {9}, issn = {21693536}, doi = {10.1109/ACCESS.2021.3110336}, abstract = {The rapid rise of non-communicable diseases (NCDs) becomes one of the serious health issues and the leading cause of death worldwide. In recent years, artificial intelligence-based systems have been developed to assist clinicians in decision-making to reduce morbidity and mortality. However, a common drawback of these modern studies is related to explanations of their output. In other words, understanding the inner logic behind the predictions is hidden to the end-user. Thus, clinicians struggle to interpret these models because of their black-box nature, and hence they are not acceptable in the medical practice. To address this problem, we have proposed a Deep Shapley Additive Explanations (DeepSHAP) based deep neural network framework equipped with a feature selection technique for NCDs prediction and explanation among the population in the United States. Our proposed framework comprises three components: First, representative features are done based on the elastic net-based embedded feature selection technique; second a deep neural network classifier is tuned with the hyper-parameters and used to train the model with the selected feature subset; third, two kinds of model explanation are provided by the DeepSHAP approach. Herein, (I) explaining the risk factors that affected the model's prediction from the population-based perspective; (II) aiming to explain a single instance from the human-centered perspective. The experimental results indicated that the proposed model outperforms various state-of-the-art models. In addition, the proposed model can improve the medical understanding of NCDs diagnosis by providing general insights into the changes in disease risk at the global and local levels. Consequently, DeepSHAP based explainable deep learning framework contributes not only to the medical decision support systems but also can provide to real-world needs in other domains.}, journal = {IEEE Access}, author = {Davagdorj, Khishigsuren and Bae, Jang Whan and Pham, Van Huy and Theera-Umpon, Nipon and Ryu, Keun Ho}, year = {2021}, keywords = {Non-communicable diseases, deep neural network, deep shapley additive explanations, explainable artificial intelligence, feature selection, prediction}, pages = {123672--123688}, }
@article{8998219, title = {Context-{Similarity} {Collaborative} {Filtering} {Recommendation}}, volume = {8}, issn = {21693536}, doi = {10.1109/ACCESS.2020.2973755}, abstract = {This article proposes a new method to overcome the sparse data problem of the collaborative filtering models (CF models) by considering the homologous relationship between users or items calculated on contextual attributes when we build the CF models. In the traditional CF models, the results are built only based on data from the user's ratings for items. The results of the proposed models are calculated on two factors: (1) the similar factors based on rating values; (2) the similar factors based on contextual attributes. The findings from the experimentation on two datasets DePaulMovie and InCarMusic, show that the proposed models have higher accuracy than the traditional CF models.}, journal = {IEEE Access}, author = {Huynh, Hiep Xuan and Phan, Nghia Quoc and Pham, Nghi Mong and Pham, Van Huy and Hoang Son, Le and Abdel-Basset, Mohamed and Ismail, Mahmoud}, year = {2020}, keywords = {CIBCF models, CUBCF models, context-similarity matrix, contextual attributes}, pages = {33342--33351}, }
@article{Pham2020, title = {A {Novel} {Single} {Valued} {Neutrosophic} {Hesitant} {Fuzzy} {Time} {Series} {Model}: {Applications} in {Indonesian} and {Argentinian} {Stock} {Index} {Forecasting}}, volume = {8}, issn = {21693536}, doi = {10.1109/ACCESS.2020.2982825}, abstract = {This paper proposed a novel first-order single-valued neutrosophic hesitant fuzzy time series (SVNHFTS) forecasting model. Our aim is to improve the previously proposed neutrosophic time series (NTS) model by incorporating the degree of the hesitancy using single-valued neutrosophic hesitant fuzzy set (SVNHFS) model instead of single-valued neutrosophic set (SVNS). Our paper's novelty is that we incorporate an algorithm that automatically converts the crisp dataset into the neutrosophic set that eliminates the need for experts' input or opinions in determining the membership in each of the partitioned neutrosophic set. We also incorporate Markov Chain algorithm in the de-neutrosophication process to include the weightage of the repeating neutrosophic logical relationships (NLRs). Our paper's significant contribution is to add to the existing body of knowledge related to fuzzy time series (FTS) by developing a new FTS model based on SVNHFS, one of the improved version of fuzzy sets, since this area of research is still relatively underdeveloped. To determine our proposed model's capability, we apply our proposed SVNHFTS model to three real datasets while also comparing the result to the other FTS models based on improved versions of fuzzy sets. Our datasets include benchmark enrollment data of University of Alabama, IDX Composite (Indonesian composite stock index), and MERVAL index (Argentinian composite stock index). The result shows that our proposed SVNHFTS model outperforms most of the other FTS models in terms of AFE and RMSE, especially the previously proposed NTS model.}, journal = {IEEE Access}, author = {Tanuwijaya, Billy and Selvachandran, Ganeshsree and Son, Le Hoang and Abdel-Basset, Mohamed and Huynh, Hiep Xuan and Pham, Van Huy and Ismail, Mahmoud}, year = {2020}, keywords = {Single-valued neutrosophic hesitant fuzzy set (SVN, fuzzy time series (FTS), neutrosophic time series (NTS), single-valued neutrosophic hesitant fuzzy time ser}, pages = {60126--60141}, }
@inproceedings{Van_Huy_Pham_70119965, title = {Deep {Feature} {Extraction} for {Panoramic} {Image} {Stitching}}, volume = {12034 LNAI}, isbn = {978-3-030-42057-4}, url = {http://doi.org/10.1007/978-3-030-42058-1%5C_12}, doi = {10.1007/978-3-030-42058-1_12}, abstract = {Image stitching is an important task in image processing and computer vision. Image stitching is the process of combining multiple photographic images with overlapping fields of view to produce a segmented panorama, resolution image. It is widely used in object reconstruction, panoramic creating. In this paper, we present an approach based on deep learning for image stitching, which are applied to generate high resolution panoramic image supporting for virtual tour interaction. Different from most existing image matching methods, the proposed method extracts image features using deep learning approach. Our approach directly estimates locations of features between pairwise constraint of images by maximizing an image- patch similarity metric between images. A large dataset high resolution images and videos from natural tourism scenes were collected for training and evaluation. Experimental results illustrated that the deep feature approach outperforms.}, booktitle = {Lecture {Notes} in {Computer} {Science} (including subseries {Lecture} {Notes} in {Artificial} {Intelligence} and {Lecture} {Notes} in {Bioinformatics})}, author = {Hoang, Van Dung and Tran, Diem Phuc and Nhu, Nguyen Gia and Pham, The Anh and Pham, Van Huy}, year = {2020}, note = {ISSN: 16113349}, keywords = {Deep learning, Feature extraction, Feature matching, Image stitching}, pages = {141--151}, }
@inproceedings{Pham2020, title = {Deep {Reconstruction} {Error} {Based} {Unsupervised} {Outlier} {Detection} in {Time}-{Series}}, volume = {12034 LNAI}, isbn = {978-3-030-42057-4}, doi = {10.1007/978-3-030-42058-1_26}, abstract = {With all the advanced technology nowadays, the availability of time-series data is being increased. Outlier detection is an identification of abnormal patterns that provide useful information for many kinds of applications such as fraud detection, fault diagnosis, and disease detection. However, it will require an expensive domain and professional knowledge if there is no label which indicates normal and abnormality. Therefore, an unsupervised novelty detection approach will be used. In this paper, we propose a deep learning-based approach. First, it prepares subsequences according to the optimal lag length using Autoregressive (AR) model. The selected lag length for time-series analysis defines the data context in which further analysis is performed. Then, reconstruction errors (RE) of the subsequences on deep convolutional autoencoder (CAE) models are used to estimate the outlier threshold, and density-based clustering is used to identify outliers. We have compared the proposed method with several publicly available state-of-the-art anomaly detection methods on 30 time-series benchmark datasets. These results show that our proposed deep reconstruction error based approach outperforms the compared methods in most of the cases.}, booktitle = {Lecture {Notes} in {Computer} {Science} (including subseries {Lecture} {Notes} in {Artificial} {Intelligence} and {Lecture} {Notes} in {Bioinformatics})}, author = {Amarbayasgalan, Tsatsral and Lee, Heon Gyu and Van Huy, Pham and Ryu, Keun Ho}, year = {2020}, note = {ISSN: 16113349}, keywords = {Anomaly, Autoregressive model, Deep convolutional autoencoder, Outlier, Time-series data}, pages = {312--321}, }
@inproceedings{Van_Huy_Pham_70119969, title = {Currency {Recognition} {Based} on {Deep} {Feature} {Selection} and {Classification}}, volume = {1178 CCIS}, isbn = {9789811533792}, url = {http://doi.org/10.1007/978-981-15-3380-8%5C_24}, doi = {10.1007/978-981-15-3380-8_24}, abstract = {Advanced technology has played an important role in the circulation of the banknote counterfeit and currency value recognition. This study proposes an approach for the currency recognition based on the fundamental image processing and deep learning for the extraction characteristics and recognition of currency values. The large capacity of traditional techniques was proposed for currency recognition based on infrared spectrometer and chemometrics using special devices. This paper presents a recognition method to detect face values from currency paper and. The proposed method can recognize some kinds of currency values and national currencies. The study investigated and proposed the deep neural network, which reaches appropriate accuracy rate and reduces consumption time. In order to improve accuracy of recognition model, data augmentation techniques are also investigated for training data preprocessing. The experimental results show that the proposed approach is applicable to the practical applications.}, booktitle = {Communications in {Computer} and {Information} {Science}}, author = {Trinh, Hung Cuong and Vo, Hoang Thanh and Pham, Van Huy and Nath, Bhagawan and Hoang, Van Dung}, year = {2020}, note = {ISSN: 18650937}, keywords = {Currency recognition, Deep feature extraction, Deep learning}, pages = {273--281}, }
@article{Pham2020, title = {Comparison of the {Framingham} {Risk} {Score} and {Deep} {Neural} {Network}-{Based} {Coronary} {Heart} {Disease} {Risk} {Prediction}}, volume = {156}, issn = {21903026}, doi = {10.1007/978-981-13-9714-1_30}, abstract = {Coronary heart disease (CHD) is one of the top causes of death globally; if suffering from CHD, long time permanent treatments are required. Furthermore, the early detection of CHD is not easy; doctors diagnose it based on many kinds of clinical tests. Therefore, it is effective to reduce the risks of developing CHD by predicting high-risk people who will suffer from CHD. The Framingham Risk Score (FRS) is a gender-specific algorithm used to estimate at 10-years CHD risk of an individual. However, FRS cannot well estimate risk in populations other than the US population. In this study, we have proposed a deep neural network (DNN); this approach has been compared with the FRS and data mining-based CHD risk prediction models in the Korean population. As a result of our experiment, models using data mining have given higher accuracy than FRS-based prediction. Moreover, the proposed DNN has shown the highest accuracy and area under the curve (AUC) score, 82.67\%, and 82.64\%, respectively.}, journal = {Smart Innovation, Systems and Technologies}, author = {Amarbayasgalan, Tsatsral and Van Huy, Pham and Ryu, Keun Ho}, year = {2020}, note = {ISBN: 9789811397134}, keywords = {Coronary heart disease, Data mining, Deep neural network, Framingham risk score}, pages = {273--280}, }
@article{Pham2020, title = {Xgboost-based framework for smoking-induced noncommunicable disease prediction}, volume = {17}, issn = {16604601}, doi = {10.3390/ijerph17186513}, abstract = {Smoking-induced noncommunicable diseases (SiNCDs) have become a significant threat to public health and cause of death globally. In the last decade, numerous studies have been proposed using artificial intelligence techniques to predict the risk of developing SiNCDs. However, determining the most significant features and developing interpretable models are rather challenging in such systems. In this study, we propose an efficient extreme gradient boosting (XGBoost) based framework incorporated with the hybrid feature selection (HFS) method for SiNCDs prediction among the general population in South Korea and the United States. Initially, HFS is performed in three stages: (I) significant features are selected by t-test and chi-square test; (II) multicollinearity analysis serves to obtain dissimilar features; (III) final selection of best representative features is done based on least absolute shrinkage and selection operator (LASSO). Then, selected features are fed into the XGBoost predictive model. The experimental results show that our proposed model outperforms several existing baseline models. In addition, the proposed model also provides important features in order to enhance the interpretability of the SiNCDs prediction model. Consequently, the XGBoost based framework is expected to contribute for early diagnosis and prevention of the SiNCDs in public health concerns.}, number = {18}, journal = {International Journal of Environmental Research and Public Health}, author = {Davagdorj, Khishigsuren and Pham, Van Huy and Theera-Umpon, Nipon and Ryu, Keun Ho}, year = {2020}, pmid = {32906777}, keywords = {Extreme gradient boosting, Feature selection, Noncommunicable disease, Smoking}, pages = {1--22}, }
@article{Pham2020, title = {Synthetic oversampling based decision support framework to solve class imbalance problem in smoking cessation program}, volume = {17}, issn = {17277841}, doi = {10.6703/IJASE.202009_17(3).223}, abstract = {Smoking is one of the significant avoidable risk factors for premature death. Most smokers make multiple quit attempts during their lifetime but smoking dependence is not easy and many people eventually failed quit attempts. Predicting the likelihood of success in smoking cessation program is necessary for public health. In recent years, a few numbers of decision support systems have been developed for dealing with smoking cessation based on machine learning techniques. However, the class imbalance problem is increasingly recognized as serious in real-world applications. Therefore, this paper presents a synthetic minority over-sampling technique (SMOTE) based decision support framework in order to predict the success of smoking cessation program using Korea National Health and Nutrition Examination Survey (KNHANES) dataset. We carried out experiments as follows: I) the unnecessary instances and variables have been eliminated, II) then we employed three variations of SMOTE, III) also the prediction models have been constructed. Finally, compare the prediction models to obtain the best model. Our experimental results showed that SMOTE improved the prediction performance of machine learning classifiers among evaluation metrics. Moreover, SMOTE regular based Random Forest (RF) and Naïve Bayes (NB) classifiers were determined the best prediction models in real-world smoking cessation dataset. Consequently, our decision support framework can interpret the important risk factors of smoking cessation using multivariate regression analysis.}, number = {3}, journal = {International Journal of Applied Science and Engineering}, author = {Davagdorj, Khishigsuren and Lee, Jong Seol and Park, Kwang Ho and Van Huy, Pham and Ryu, Keun Ho}, year = {2020}, keywords = {Class imbalance, Machine learning classifiers, Risk factor analysis, Smoking cessation, Synthetic minority oversampling}, pages = {223--235}, }
@article{Pham2020, title = {Class-{Incremental} {Learning} with {Deep} {Generative} {Feature} {Replay} for {DNA} {Methylation}-{Based} {Cancer} {Classification}}, volume = {8}, issn = {21693536}, doi = {10.1109/ACCESS.2020.3039624}, abstract = {Developing lifelong learning algorithms are mandatory for computational systems biology. Recently, many studies have shown how to extract biologically relevant information from high-dimensional data to understand the complexity of cancer by taking the benefit of deep learning (DL). Unfortunately, new cancer growing up into the hundred types that make systems difficult to classify them efficiently. In contrast, the current state-of-the-art continual learning (CL) methods are not designed for the dynamic characteristics of high-dimensional data. And data security and privacy are some of the main issues in the biomedical field. This article addresses three practical challenges for class-incremental learning (Class-IL) such as data privacy, high-dimensionality, and incremental learning problems. To solve this, we propose a novel continual learning approach, called Deep Generative Feature Replay (DGFR), for cancer classification tasks. DGFR consists of an incremental feature selection (IFS) and a scholar network (SN). IFS is used for selecting the most significant CpG sites from high-dimensional data. We investigate different dimensions to find an optimal number of selected CpG sites. SN employs a deep generative model for generating pseudo data without accessing past samples and a neural network classifier for predicting cancer types. We use a variational autoencoder (VAE), which has been successfully applied to this research field in previous works. All networks are sequentially trained on multiple tasks in the Class-IL setting. We evaluated the proposed method on the publicly available DNA methylation data. The experimental results show that the proposed DGFR achieves a significantly superior quality of cancer classification tasks with various state-of-the-art methods in terms of accuracy.}, journal = {IEEE Access}, author = {Batbaatar, Erdenebileg and Park, Kwang Ho and Amarbayasgalan, Tsatsral and Davagdorj, Khishigsuren and Munkhdalai, Lkhagvadorj and Pham, Van Huy and Ryu, Keun Ho}, year = {2020}, keywords = {Computational biology, DNA methylation, cancer classification, class-incremental learning, continual learning, deep generative model, deep learning, variational autoencoder}, pages = {210800--210815}, }
@article{Pham2020, title = {Multi-task topic analysis framework for hallmarks of cancer withweak supervision}, volume = {10}, issn = {20763417}, doi = {10.3390/app10030834}, abstract = {The hallmarks of cancer represent an essential concept for discovering novel knowledge about cancer and for extracting the complexity of cancer. Due to the lack of topic analysis frameworks optimized specifically for cancer data, the studies on topic modeling in cancer research still have a strong challenge. Recently, deep learning (DL) based approaches were successfully employed to learn semantic and contextual information from scientific documents using word embeddings according to the hallmarks of cancer (HoC). However, those are only applicable to labeled data. There is a comparatively small number of documents that are labeled by experts. In the real world, there is a massive number of unlabeled documents that are available online. In this paper, we present a multi-task topic analysis (MTTA) framework to analyze cancer hallmark-specific topics from documents. The MTTA framework consists of three main subtasks: (1) cancer hallmark learning (CHL)-used to learn cancer hallmarks on existing labeled documents; (2) weak label propagation (WLP)-used to classify a large number of unlabeled documents with the pre-trained model in the CHL task; and (3) topic modeling (ToM)-used to discover topics for each hallmark category. In the CHL task, we employed a convolutional neural network (CNN) with pre-trained word embedding that represents semantic meanings obtained from an unlabeled large corpus. In the ToM task, we employed a latent topic model such as latent Dirichlet allocation (LDA) and probabilistic latent semantic analysis (PLSA) model to catch the semantic information learned by the CNN model for topic analysis. To evaluate the MTTA framework, we collected a large number of documents related to lung cancer in a case study. We also conducted a comprehensive performance evaluation for the MTTA framework, comparing it with several approaches.}, number = {3}, journal = {Applied Sciences (Switzerland)}, author = {Batbaatar, Erdenebileg and Pham, Van Huy and Ryu, Keun Ho}, year = {2020}, keywords = {Biomedical domain, Cancer hallmark, Convolutional neural network, Latent semantic learning, Lung cancer, Multi-task learning, Semantic learning, Topic analysis}, }
@article{Pham2020, title = {A comparative analysis of machine learning methods for class imbalance in a smoking cessation intervention}, volume = {10}, issn = {20763417}, doi = {10.3390/app10093307}, abstract = {Smoking is one of the major public health issues, which has a significant impact on premature death. In recent years, numerous decision support systems have been developed to deal with smoking cessation based on machine learning methods. However, the inevitable class imbalance is considered a major challenge in deploying such systems. In this paper, we study an empirical comparison of machine learning techniques to deal with the class imbalance problem in the prediction of smoking cessation intervention among the Korean population. For the class imbalance problem, the objective of this paper is to improve the prediction performance based on the utilization of synthetic oversampling techniques, which we called the synthetic minority over-sampling technique (SMOTE) and an adaptive synthetic (ADASYN). This has been achieved by the experimental design, which comprises three components. First, the selection of the best representative features is performed in two phases: the lasso method and multicollinearity analysis. Second, generate the newly balanced data utilizing SMOTE and ADASYN technique. Third, machine learning classifiers are applied to construct the prediction models among all subjects and each gender. In order to justify the effectiveness of the prediction models, the f-score, type I error, type II error, balanced accuracy and geometric mean indices are used. Comprehensive analysis demonstrates that Gradient Boosting Trees (GBT), Random Forest (RF) and multilayer perceptron neural network (MLP) classifiers achieved the best performances in all subjects and each gender when SMOTE and ADASYN were utilized. The SMOTE with GBT and RF models also provide feature importance scores that enhance the interpretability of the decision-support system. In addition, it is proven that the presented synthetic oversampling techniques with machine learning models outperformed baseline models in smoking cessation prediction.}, number = {9}, journal = {Applied Sciences (Switzerland)}, author = {Davagdorj, Khishigsuren and Lee, Jong Seol and Pham, Van Huy and Ryu, Keun Ho}, year = {2020}, keywords = {Class imbalance, Decision making, Feature importance, Machine learning, Smoking, Synthetic oversampling}, }
@article{Van_Huy_Pham_70120571, title = {A {Strongly} {Convergent} {Modified} {Halpern} {Subgradient} {Extragradient} {Method} for {Solving} the {Split} {Variational} {Inequality} {Problem}}, volume = {48}, issn = {23052228}, url = {http://doi.org/10.1007/s10013-019-00378-y}, doi = {10.1007/s10013-019-00378-y}, abstract = {We propose a method for solving the split variational inequality problem (SVIP) involving Lipschitz continuous and pseudomonotone mappings. The proposed method is inspired by the Halpern subgradient extragradient method for solving the monotone variational inequality problem with a simple step size. A strong convergence theorem for an algorithm for solving such a SVIP is proved without the knowledge of the Lipschitz constants of the mappings. As a consequence, we get a strongly convergent algorithm for finding the solution of the split feasibility problem (SFP), which requires only two projections at each iteration step. A simple numerical example is given to illustrate the proposed algorithm.}, number = {1}, journal = {Vietnam Journal of Mathematics}, author = {Van Huy, Pham and Hien, Nguyen Duc and Anh, Tran Viet}, year = {2020}, keywords = {Halpern subgradient extragradient method, Pseudomonotone mapping, Split feasibility problem, Split variational inequality problem, Strong convergence}, pages = {187--204}, }
@article{Pham2020, title = {Inverse anti-k-centrum problem on networks with variable edge lengths}, volume = {24}, issn = {10275487}, doi = {10.11650/tjm/190602}, abstract = {This paper concerns the problem of modifying edge lengths of a network at minimum total costs so as to make a prespecified vertex become an optimal location in the modified environment. Here, we focus on the ordered median objective function with respect to the vector of multipliers λ = (1, …, 1, 0, …, 0) with k 1’s. This problem is called the inverse anti-k-centrum problem. We first show that the inverse anti-k-centrum problem is NP-hard even on tree networks. However, for the inverse anti-k-centrum problem on cycles, we formulate it as one or two linear programs, depending on odd or even integer k. Concerning the special cases with k = 2, 3, M, we develop combinatorial algorithms that efficiently solve the problem, where M is the number of vertices of the cycle.}, number = {2}, journal = {Taiwanese Journal of Mathematics}, author = {Pham, Van Huy and Nguyen, Kien Trung}, year = {2020}, keywords = {Anti-k-centrum, Cycle, Inverse optimization problems, Location problems, Ordered median function, Tree}, pages = {501--522}, }
@article{Pham2020, title = {Unsupervised anomaly detection approach for time-series in multi-domains using deep reconstruction error}, volume = {12}, issn = {20738994}, doi = {10.3390/SYM12081251}, abstract = {Automatic anomaly detection for time-series is critical in a variety of real-world domains such as fraud detection, fault diagnosis, and patient monitoring. Current anomaly detection methods detect the remarkably low proportion of the actual abnormalities correctly. Furthermore, most of the datasets do not provide data labels, and require unsupervised approaches. By focusing on these problems, we propose a novel deep learning-based unsupervised anomaly detection approach (RE-ADTS) for time-series data, which can be applicable to batch and real-time anomaly detections. RE-ADTS consists of two modules including the time-series reconstructor and anomaly detector. The time-series reconstructor module uses the autoregressive (AR) model to find an optimal window width and prepares the subsequences for further analysis according to the width. Then, it uses a deep autoencoder (AE) model to learn the data distribution, which is then used to reconstruct a time-series close to the normal. For anomalies, their reconstruction error (RE) was higher than that of the normal data. As a result of this module, RE and compressed representation of the subsequences were estimated. Later, the anomaly detector module defines the corresponding time-series as normal or an anomaly using a RE based anomaly threshold. For batch anomaly detection, the combination of the density-based clustering technique and anomaly threshold is employed. In the case of real-time anomaly detection, only the anomaly threshold is used without the clustering process. We conducted two types of experiments on a total of 52 publicly available time-series benchmark datasets for the batch and real-time anomaly detections. Experimental results show that the proposed RE-ADTS outperformed the state-of-the-art publicly available anomaly detection methods in most cases.}, number = {8}, journal = {Symmetry}, author = {Amarbayasgalan, Tsatsral and Pham, Van Huy and Theera-Umpon, Nipon and Ryu, Keun Ho}, year = {2020}, keywords = {Anomaly detection, Deep autoencoder, Density-based clustering, Reconstruction error, Unsupervised technique}, }
@article{Pham2020, title = {Mobile application framework for {IobT} hydrogen skin moisturizing}, volume = {830}, issn = {1860949X}, doi = {10.1007/978-3-030-14132-5_6}, abstract = {In this paper, we developed mobile application framework which manages the hydrogen skin moisturizing device. We use react native framework for developing the mobile application. It allows us to manage and control the hydrogen skin device. Our hydrogen skin moisturizing device is based on PCB boards. We were connecting to PCB boards by Bluetooth communication methods. React native is a framework that provides the same results as a native application developed by a mobile application. It’s just enough to save time and money for converting the one written code into other platforms.}, journal = {Studies in Computational Intelligence}, author = {Jargalsaikhan, Bilguun and Kim, Ki Yong and Batbaatar, Erdenebileg and Park, Kwang Ho and Van Huy, Pham and Lee, Jong Yun and Ryu, Keun Ho}, year = {2020}, keywords = {Bluetooth communication method, Mobile application, PCB board, React native}, pages = {67--81}, }
@article{Pham2020, title = {{BPH} {Sensor} {Network} {Optimization} {Based} on {Cellular} {Automata} and {Honeycomb} {Structure}}, volume = {25}, issn = {15728153}, doi = {10.1007/s11036-019-01434-0}, abstract = {The brown planthopper (BPH) is a crucial pest of rice in tropical zones like the Mekong Delta of Vietnam. It economically causes severe loss to the rice harvest via direct nutritional depletion. Many studies address the BPH surveillance by using networks of wireless sensors that are mounted on light traps. However, these approaches have not been confirmed as effective deployment due to inoperative light traps’ locations. The problem is that the geographical area of towns is not identical, leading to unnecessary redundancy of sensors and light traps. Our aim in this article is to optimize the locations of BPH sensor networks by utilizing cellular automata and honeycomb architecture which have not been affected by the spatial characteristic geographically. The authors have made several contributions regarding the mentioned problem by (i) quantitatively proving that the deployment cost of BPH sensor networks is significantly reduced, and consequently (ii) optimizing the BPH sensor network. Therefore, the appropriate configuration of the network is maintained in any circumstances. The experiments have been performed on BPH surveillance networks in Hau Giang, a substantial rice province in the Mekong Delta of Vietnam.}, number = {3}, journal = {Mobile Networks and Applications}, author = {Huynh, Hiep Xuan and Dang, Huy Quang and Luong, Huong Hoang and Ong, Linh My Thi and Duong-Trung, Nghia and Huynh, Toan Phung and Pham, Van Huy and Pottier, Bernard}, year = {2020}, keywords = {Cellular automata, Honeycomb, Light traps, Sensor network}, pages = {1140--1150}, }
@article{Pham2020, title = {Source-{Word} decomposition for neural machine translation}, volume = {2020}, issn = {15635147}, doi = {10.1155/2020/4795187}, abstract = {End-to-end neural machine translation does not require us to have specialized knowledge of investigated language pairs in building an effective system. On the other hand, feature engineering proves to be vital in other artificial intelligence fields, such as speech recognition and computer vision. Inspired by works in those fields, in this paper, we propose a novel feature-based translation model by modifying the state-of-the-art transformer model. Specifically, the encoder of the modified transformer model takes input combinations of linguistic features comprising of lemma, dependency label, part-of-speech tag, and morphological label instead of source words. The experiment results for the Russian-Vietnamese language pair show that the proposed feature-based transformer model improves over the strongest baseline transformer translation model by impressive 4.83 BLEU. In addition, experiment analysis reveals that human judgment on the translation results strongly confirms machine judgment. Our model could be useful in building translation systems translating from a highly inflectional language into a noninflectional language.}, journal = {Mathematical Problems in Engineering}, author = {Nguyen, Thien and Le, Hoai and Pham, Van Huy}, year = {2020}, }
@article{Vo2019b, title = {Efficacy of electrochemically activated water solution in gingivitis treatment}, volume = {49}, issn = {20936214}, url = {http://dx.doi.org/10.1007/S40005-018-00419-7}, doi = {10.1007/s40005-018-00419-7}, abstract = {Mouthwash is one of the most commonly oral healthcare products for prevention and treatment of gum diseases. Dr. ECA is a disinfectant solution manufactured by a Vietnamese company and has been licensed for oral healthcare, but its effect on gingivitis is still unclear. To evaluate the effectiveness of this product in the treatment of gingivitis, we conducted a clinical study in 60 students diagnosed with gingivitis to compare the results of gingivitis treatment using Dr. ECA and a control mouthwash (a demonstrated anti-gingivitis product). The data showed that in the treated group, the rate of good Gingivitis Index (GI) before treatment was 46.67\%, but after 2 and 4 weeks of using Dr. ECA that rate was increased to 96.67\% and 100\%, respectively. The percentage of patients with bleeding on probing was 63.33\% but declined sharply after 2 weeks of treatment to 6.67\%, and further reduced to 3.33\% after 4 weeks of treatment. The treatment outcomes in were similar in the Dr. ECA treated group and control one with no statistically significant difference. Our results suggest that the electrolyte saline solution Dr. ECA is effective in treating gingivitis. Its effects are similar to other mouthwash products that had been approved and marketed.}, number = {3}, journal = {Journal of Pharmaceutical Investigation}, author = {Vo, Truong Nhu Ngoc and Chu, Dinh Toi and Duong, Duc Long and Bui, Van Nhon and Tong, Minh Son and Nguyen, Thi Thu Phuong and Le, Quynh Anh and Nguyen, Khanh Hoang and Pham, Van Huy and Chu-Dinh, Thien}, month = may, year = {2019}, note = {Publisher: Springer Science and Business Media LLC}, keywords = {Dr. ECA, Electrochemically activated water solutions, Gingivitis, Mouthwash}, pages = {323--329}, }
@article{Dung2019, title = {Author {Correction}: {Evaluation} of dental arch dimensions in 12 year-old {Vietnamese} children - {A} cross-sectional study of 4565 subjects ({Scientific} {Reports}, (2019), 9, 1, (3101), 10.1038/s41598-019-39710-4)}, volume = {9}, issn = {20452322}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85074625924%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1038/s41598-019-53311-1}, abstract = {The original version of this Article contained an error in Affiliation 10, which was incorrectly given as ‘AI Lab, Faculty of Information Technology, Ton Duc Thang University, Ho Chi Minh City, Vietnam, Ho Chi Minh City, Vietnam’. The correct affiliation is listed below: AI Lab, Faculty of Information Technology, Ton Duc Thang University, Ho Chi Minh City, Vietnam This error has now been corrected in the HTML and PDF versions of the Article.}, number = {1}, journal = {Scientific Reports}, author = {Dung, Truong Manh and Ngoc, Vo Truong Nhu and Hiep, Nguyen Hung and Khoi, Truong Dinh and Xiem, Vu Van and Chu-Dinh, Thien and Cieslar-Pobuda, Artur and Stoufi, Eleana and Show, Pau Loke and Tao, Yang and Bac, Nguyen Duy and Ba, Nguyen Van and Le, Quynh Anh and Pham, Van Huy and Chu, Dinh Toi}, month = dec, year = {2019}, pmid = {31695133}, note = {Publisher: Nature Publishing Group}, }
@article{Nga2019, title = {School education and childhood obesity: {A} systemic review}, volume = {13}, issn = {18780334}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85068759626%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1016/j.dsx.2019.07.014}, abstract = {Childhood obesity prevalence is shooting up at a phenomenal rate worldwide, leading to long-term devastating consequences. A great number of studies have investigated factors contributing to the increase in BMI of children and adolescents. School-based, home-based and clinic-based solutions have been suggested as possible viable strategies, among which school-based interventions is believed to produce a noticeable effect on a massive scale. However, the question of whether school interventions, especially school education exert significant impact on childhood obesity or not, is left with mixing results. This article aims to holistically review the relationship between school education and childhood obesity. Various factors are covered, including health education, nutrition education, school nutrition, physical education, teachers’ awareness, teaching practice and school stress, In all, school education is not the answer to childhood obesity but just part of it. More attempts from other stakeholders (parents, community, policy makers, researchers, etc.) should be made in order to solve this complicated puzzle.}, number = {4}, journal = {Diabetes and Metabolic Syndrome: Clinical Research and Reviews}, author = {Nga, Vu Thi and Dung, Vo Ngoc Thuy and Chu, Dinh Toi and Tien, Nguyen Le Bao and Van Thanh, Vo and Ngoc, Vo Truong Nhu and Hoan, Le Ngoc and Phuong, Nguyen Thi and Pham, Van Huy and Tao, Yang and Linh, Nguyen Phuong and Show, Pau Loke and Do, Duc Lan}, month = jul, year = {2019}, pmid = {31405667}, note = {Publisher: Elsevier Ltd}, keywords = {Childhood obesity, School education, School-based}, pages = {2495--2501}, }
@article{Van_Huy_Pham_76157356, title = {Prevalence of dental fear and its relationship with primary dental caries in 7-year-old-children}, volume = {29}, issn = {18803997}, url = {http://doi.org/10.1016/J.PDJ.2019.04.002}, doi = {10.1016/j.pdj.2019.04.002}, abstract = {Background: Child's dental fear has been reported as one of the reasons that increase, aggravate dental diseases and facilitate other oral diseases. This study is aimed to describe the type and prevalence of dental fear and to assess the relationship between cavities of primary teeth and dental fear in 7-year-old children at Phulam Primary School, Hanoi. Methods: The sample comprised of 132 children aged 7 years. The questionnaire examined the profile of participants and assessed their dental fear using the Children's Fear Survey Schedule-Dental Subscale (CFSS-DS). Children have “dental fear” when the total CFSS-DS score is greater than or equal to 38. By contrast, those without dental fear gain the total point which is less than 38. After completing the questionnaire, a dental examination was undertaken according to the International Caries Detection and Assessment System (ICDAS). Results: The prevalence of dental fear was 34.85\%. Fear scores were highest for “Dentist drilling” (2.92 ± 1.47) and “Injections” (2.87 ± 1.53). In the univariate analysis, the odds of girls having dental fear were approximately equal to boys (OR = 0.98, 95\% CI = −0.75-0.70). The odds of only children having dental fear were 1.6 times higher than others, but there were not significant. Dental fear was found to be no associated to sex, birth order and primary dental caries (p {\textgreater} 0.05). Conclusion: Our findings demonstrated the status of 7-year-old children's dental fear at Phulam primary school, and found that primary dental caries had no correlation with child dental fear score.}, number = {2}, journal = {Pediatric Dental Journal}, author = {Son, Tong Minh and Nhu Ngoc, Vo Truong and Tran, Phung Thi and Nguyen, Nga Phuong and Luong, Hang Minh and Nguyen, Ha Thu and Sharma, Kulbhushan and Van Tu, Pham and Ha, Luu Song and Ha, Vu Ngoc and Van Huy, Pham and Thimiri Govinda Raj, Deepak B. and Chu, Dinh Toi}, year = {2019}, keywords = {Children, Dental fear, Primary dental caries}, pages = {84--89}, }
@article{Pham2019, title = {The effects of adipocytes on the regulation of breast cancer in the tumor microenvironment: {An} update}, volume = {8}, issn = {20734409}, doi = {10.3390/cells8080857}, abstract = {Obesity is a global pandemic and it is well evident that obesity is associated with the development of many disorders including many cancer types. Breast cancer is one of that associated with a high mortality rate. Adipocytes, a major cellular component in adipose tissue, are dysfunctional during obesity and also known to promote breast cancer development both in vitro and in vivo. Dysfunctional adipocytes can release metabolic substrates, adipokines, and cytokines, which promote proliferation, progression, invasion, and migration of breast cancer cells. The secretion of adipocytes can alter gene expression profile, induce inflammation and hypoxia, as well as inhibit apoptosis. It is known that excessive free fatty acids, cholesterol, triglycerides, hormones, leptin, interleukins, and chemokines upregulate breast cancer development. Interestingly, adiponectin is the only adipokine that has anti-tumor properties. Moreover, adipocytes are also related to chemotherapeutic resistance, resulting in the poorer outcome of treatment and advanced stages in breast cancer. Evaluation of the adipocyte secretion levels in the circulation can be useful for prognosis and evaluation of the effectiveness of cancer therapy in the patients. Therefore, understanding about functions of adipocytes as well as obesity in breast cancer may reveal novel targets that support the development of new anti-tumor therapy. In this systemic review, we summarize and update the effects of secreted factors by adipocytes on the regulation of breast cancer in the tumor microenvironment.}, number = {8}, journal = {Cells}, author = {Chu, Dinh Toi and Phuong, Thuy Nguyen Thi and Tien, Nguyen Le Bao and Tran, Dang Khoa and Nguyen, Tran Thuy and Thanh, Vo Van and Quang, Thuy Luu and Minh, Le Bui and Pham, Van Huy and Ngoc, Vo Truong Nhu and Kushekhar, Kushi and Chu-Dinh, Thien}, year = {2019}, pmid = {31398937}, keywords = {Adipocytes, Adipokines, Breast cancer, Hormones, Obesity, Tumor microenvironment}, }
@article{Nga2019a, title = {Zoonotic diseases from birds to humans in {Vietnam}: possible diseases and their associated risk factors}, volume = {38}, issn = {14354373}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85062623025%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1007/s10096-019-03505-2}, abstract = {In recent decades, exceeding 60\% of infectious cases in human beings are originated from pathogenic agents related to feral or companion animals. This figure continues to swiftly increase due to excessive exposure between human and contaminated hosts by means of applying unhygienic farming practices throughout society. In Asia countries—renowned for lax regulation towards animal-trading markets—have experienced tremendous outbreaks of zoonotic diseases every year. Meanwhile, various epidemic surges were first reported in the residential area of China—one of the largest distributor of all animal products on the planet. Some noticeable illnesses comprising of A/H5N1 or H7N9—known as avian influenza which transmitted from poultry and also wild birds—have caused inevitable disquiet among inhabitants. Indeed, poultry farming industry in China has witnessed dynamic evolution for the past two decades, both in quantity and degree of output per individual. Together with this pervasive expansion, zoonotic diseases from poultry have incessantly emerged as a latent threat to the surrounding residents in entire Asia and also European countries. Without strict exporting legislation, Vietnam is now facing the serious problem in terms of poultry distribution between the two countries’ border. Even though several disease investigations have been conducted by many researchers, the disease epidemiology or transmission methods among people remained blurred and need to be further elucidated. In this paper, our aim is to provide a laconic review of common zoonotic diseases spread in Vietnam, outstanding cases and several factors predisposing to this alarming situation.}, number = {6}, journal = {European Journal of Clinical Microbiology and Infectious Diseases}, author = {Nga, Vu Thi and Ngoc, Tran Uyen and Minh, Le Bui and Ngoc, Vo Truong Nhu and Pham, Van Huy and Nghia, Le Long and Son, Nguyen Lan Hung and Van Pham, Thi Hong and Bac, Nguyen Duy and Tien, Tran Viet and Tuan, Nguyen Ngoc Minh and Tao, Yang and Show, Pau Loke and Chu, Dinh Toi}, month = jun, year = {2019}, pmid = {30806904}, note = {Publisher: Springer Verlag}, keywords = {Birds, Human, Review, Risk factors, Vietnam, Zoonotic diseases}, pages = {1047--1058}, }
@article{Chu2019b, title = {Socioeconomic inequalities in the {HIV} testing during antenatal care in vietnamese women}, volume = {16}, issn = {16604601}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85071737298%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.3390/ijerph16183240}, abstract = {Although HIV (human immunodeficiency virus) testing for all women has been promoted by Vietnam’s Ministry of Health since 2000, test acceptance rates in this country were reported to be less than 30\% in the community. This country has been facing the barriers to approach the national services towards transmission prevention from mother to child including HIV testing during antenatal care (ANC) towards mothers. Here, we aim to assess the socioeconomic inequalities in HIV testing during ANC among Vietnamese women. This study used available data from the Vietnam Multiple Indicator Cluster Survey 2014. Overall, the prevalence of HIV testing during antenatal care was 30\% and the concentrate index (CCI) was 0.1926. There was significant inequality between women classified as poor and rich, and when stratified by social characteristics, inequality was found in women aged 15–49 years (CCI: 0.4), living in rural areas (CCI: 0.3), belonging to ethnic minorities (CCI: 0.5) and having primary or less education (CCI: 0.4). In the multivariate logistic regression analysis, ethnicity and socioeconomic status were significant factors associated with HIV testing during ANC. We found the prevalence of HIV testing during ANC was low, and its inequalities were associated with age, living area, ethnicity, education, and economic status.}, number = {18}, journal = {International Journal of Environmental Research and Public Health}, author = {Chu, Dinh Toi and Vo, Hoang Long and Tran, Dang Khoa and Si Anh, Hao Nguyen and Hoang, Long Bao and Nhu, Phong Tran and Ngoc, Khanh Nguyen and Nguyen, Trang Thu and Van, Quyet Pham and Tien, Nguyen Le Bao and Thanh, Vo Van and Nga, Vu Thi and Quang, Thuy Luu and Minh, Le Bui and Pham, Van Huy}, month = sep, year = {2019}, pmid = {31487845}, note = {Publisher: MDPI AG}, keywords = {Ethnicity, HIV testing, Pregnancy, Socioeconomic inequalities, Vietnamese women}, }
@article{Dung2019a, title = {Evaluation of dental arch dimensions in 12 year-old {Vietnamese} children - {A} cross-sectional study of 4565 subjects}, volume = {9}, issn = {20452322}, url = {http://www.nature.com/articles/s41598-019-39710-4}, doi = {10.1038/s41598-019-39710-4}, abstract = {This study aimed to define the width and length of the dental arch in 12-year-old Vietnamese children, and to elucidate differences between genders and among ethnic groups. A cross-sectional study was conducted in 4565 12 years-old children from the 4 major ethnic groups in Vietnam (Kinh, Muong, Thai, and Tay), with a healthy and full set of 28 permanent teeth that had never had any orthodontic treatment and with no reconstructive materials at the measured points. The mean variables in all subjects were 36.39 mm for upper inter-canine width; 46.88 mm for upper inter-first molar width; 59.43 mm for upper inter-second molar width; 10.41 mm for upper anterior length; 32.15 mm for upper posterior length 1; 45.52 mm for upper posterior length 2; 28.31 mm for lower inter-canine width; 41.63 mm for lower inter-first molar width; 54.57 mm for lower inter-second molar width (LM2W); 7.06 mm for lower anterior length (LAL); 26.87 mm for lower posterior length 1 (LP1L); and 41.29 mm for lower posterior length 2. Significant differences in these parameters between genders were found in all ethnic groups, except for LAL in the Kinh and Thai groups, and LP1L in the Tay group. Significant ethnic differences were also found in almost all parameters except LM2W in both males and females. Taken together, the representative sizes of dental arches of 12-year-old Vietnamese children have been defined. Our data indicate that there are some variations in dental arch dimensions among ethnic groups and between genders.}, number = {1}, journal = {Scientific Reports}, author = {Dung, Truong Manh and Ngoc, Vo Truong Nhu and Hiep, Nguyen Hung and Khoi, Truong Dinh and Xiem, Vu Van and Chu-Dinh, Thien and Cieslar-Pobuda, Artur and Stoufi, Eleana and Show, Pau Loke and Tao, Yang and Bac, Nguyen Duy and Van Ba, Nguyen and Le, Quynh Anh and Pham, Van Huy and Chu, Dinh Toi}, month = dec, year = {2019}, pmid = {30816230}, note = {Publisher: Nature Publishing Group}, pages = {3101}, }
@incollection{Vo_2019, title = {Video-{Based} {Vietnamese} {Sign} {Language} {Recognition} {Using} {Local} {Descriptors}}, volume = {11432 LNAI}, isbn = {978-3-030-14801-0}, url = {http://dx.doi.org/10.1007/978-3-030-14802-7_59}, abstract = {Sign Language is one of the method for non-verbal communication. It is most commonly used by deaf or dumb people who have hearing or speech problems to communicate among themselves or with normal people. Vietnamese Sign Language (VSL) is a sign language system used in the community of Vietnamese hearing impaired individuals. VSL recognition aims to develop algorithms and methods to correctly identify a sequence of produced signs and to understand their meaning in Vietnamese. However, automatic VSL recognition in video has many challenges due to the orientation of camera, hand position and movement, inter hand relation, etc. In this paper, we present some feature extraction approaches for VSL recognition includes spatial feature, scene-based feature, and especially motion-based feature. Instead of relying on a static image, we specifically capture motion information between frames in a video sequence. We evaluated the proposed framework on our acquired VSL dataset including 23 alphabets, 3 diacritic marks and 5 tones in Vietnamese language with 2D camera. Additionally, in order to gain more information of hand movement and hand position, we also used the data augmentation technique. All these helpful information would contribute to an effective VSL recognition system. The experiments achieved the satisfactory results with 86.61\%. It indicates that data augmentation technique provides more information about the orientation of hand. Moreover, the combination of spatial, scene and especially motion information could help the system to be able to capture information from both single frame and from multiple frames, and thus the performance of VSL recognition system could be improved.}, booktitle = {Lecture {Notes} in {Computer} {Science} (including subseries {Lecture} {Notes} in {Artificial} {Intelligence} and {Lecture} {Notes} in {Bioinformatics})}, publisher = {Springer International Publishing}, author = {Vo, Anh H. and Nguyen, Nhu T.Q. and Nguyen, Ngan T.B. and Pham, Van Huy and Van Giap, Ta and Nguyen, Bao T.}, year = {2019}, doi = {10.1007/978-3-030-14802-7_59}, note = {ISSN: 16113349}, keywords = {Local descriptors, Motion-based feature, Scene-based feature, Spatial feature, VSL recognition, Vietnamese Sign Language (VSL)}, pages = {680--693}, }
@article{Dinh2019, title = {The effects of green tea on lipid metabolism and its potential applications for obesity and related metabolic disorders - {An} existing update}, volume = {13}, issn = {18780334}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85063332896%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1016/j.dsx.2019.03.021}, abstract = {Obesity is one of the top global issues, which induces several serious health consequences both physically and mentally, such as type 2 diabetes, cardiovascular diseases, dyslipidemia, eating disorders, depression and stress. However, the effective therapy to prevent and treat obesity and overweight, up to now, cannot be found nowadays. Several methods/medicines namely diet control, energy balance, environmental changes, genetic and stem cell therapies, new drugs/chemicals have been extensively studied to enhance the ability to control bodyweight and prevent obesity. Of all the aforementioned methods, green tea, used as a daily beverage, has shown beneficial impacts for the health, especially its anti-obesity effects. Available evidence shows that green tea can interrupt lipid emulsification, reduce adipocyte differentiation, increase thermogenesis, and reduce food intake, thus green tea improves the systemic metabolism and decreases fat mass. Here, we highlight and sum up the update investigations of anti-obesity effect of green tea as well as discuss the potential application of them for preventing obesity and its related metabolic disorders.}, number = {2}, journal = {Diabetes and Metabolic Syndrome: Clinical Research and Reviews}, author = {Dinh, Thien Chu and Thi Phuong, Thuy Nguyen and Minh, Le Bui and Minh Thuc, Vu Thi and Bac, Nguyen Duy and Van Tien, Nguyen and Pham, Van Huy and Show, Pau Loke and Tao, Yang and Nhu Ngoc, Vo Truong and Bich Ngoc, Nguyen Thi and Jurgoński, Adam and Thimiri Govinda Raj, Deepak B. and Van Tu, Pham and Ha, Vu Ngoc and Czarzasta, Joanna and Chu, Dinh Toi}, month = mar, year = {2019}, pmid = {31336539}, note = {Publisher: Elsevier Ltd}, keywords = {Adipogenesis, Green tea, Lipid metabolism, Obesity and metabolic disorders}, pages = {1667--1673}, }
@article{Dinh2019a, title = {Zika virus in {Vietnam}, {Laos}, and {Cambodia}: are there health risks for travelers?}, volume = {38}, issn = {14354373}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85065255714%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1007/s10096-019-03563-6}, abstract = {Vietnam, Laos, and Cambodia have reported first cases of Zika virus (ZIKV) infection since 2010 (Cambodia) and 2016 (Vietnam and Laos). One case of ZIKV-related microcephaly was recognized among a hundred infected cases in these areas, raising a great concern about the health risk related to this virus infection. At least 5 cases of ZIKV infection among travelers to Vietnam, Laos, and Cambodia were recorded. It is noticeable that ZIKV in these areas can cause birth defects. This work aims to discuss the current epidemics of ZIKV in Vietnam, Laos, and Cambodia and update the infection risk of ZIKV for travelers to these areas.}, number = {9}, journal = {European Journal of Clinical Microbiology and Infectious Diseases}, author = {Dinh, Thien Chu and Bac, Nguyen Duy and Minh, Le Bui and Ngoc, Vo Truong Nhu and Pham, Van Huy and Vo, Hoang Long and Tien, Nguyen Le Bao and Van Thanh, Vo and Tao, Yang and Show, Pau Loke and Chu, Dinh Toi}, month = sep, year = {2019}, pmid = {31044332}, note = {Publisher: Springer Verlag}, keywords = {Cambodia, Laos, Travelers, Vietnam, Zika virus infection, Zika virus–infected travelers}, pages = {1585--1590}, }
@article{Chu2019a, title = {The possible zoonotic diseases transferring from pig to human in {Vietnam}}, volume = {38}, issn = {14354373}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85060733832%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1007/s10096-018-03466-y}, abstract = {Southeast Asia is considered one of worldwide hotspots consisting many distinct zoonotic infections. With optimal condition for the development of various pathogens, Vietnam is facing serious risks of zoonotic diseases. Besides, more than 50\% Vietnamese people settle in rustic areas and earn their livings through small-scale animal breeding. It is possible that zoonotic diseases can be easily spread to the population by close contact with the infected animals, their infected residues, contaminated water, soil, or other possible means of transmission. In fact, zoonotic infections—transmissible infections between vertebrate animals and humans—cover a wide range of diseases with distinctive clinical and epidemiological highlights. With insufficient understanding and swift alteration in toxicity of the pathogens, these infections have gained more concerns due to sophisticated routes of transmission and harmful threats to humans. Recently emerging viral diseases exerted potential dangers to human beings, which required many countries to impose immediate actions to prevent any complications. Vietnam has recorded several cases of zoonotic diseases, especially pig-related illnesses; however, the studies on these diseases in this country remain limited. This work aims to highlight the zoonotic diseases transferring from pigs to humans and discuss risk factors of these diseases in Vietnam.}, number = {6}, journal = {European Journal of Clinical Microbiology and Infectious Diseases}, author = {Chu, Dinh Toi and Ngoc, Tran Uyen and Chu-Dinh, Thien and Ngoc, Vo Truong Nhu and Van Nhon, Bui and Pham, Van Huy and Nghia, Le Long and Anh, Le Quynh and Van Pham, Thi Hong and Truong, Nguyen Duc}, month = jun, year = {2019}, pmid = {30680568}, note = {Publisher: Springer Verlag}, keywords = {Emerging diseases, Pig, Vietnam, Zoonotic diseases, Zoonotic infections}, pages = {1003--1014}, }
@article{Chu2019c, title = {An update on obesity: {Mental} consequences and psychological interventions}, volume = {13}, issn = {18780334}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85052964626%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1016/j.dsx.2018.07.015}, abstract = {Besides physical consequences, obesity has negative psychological effects, thereby lowering human life quality. Major psychological consequences of this disorder includes depression, impaired body image, low self-esteem, eating disorders, stress and poor quality of life, which are correlated with age and gender. Physical interventions, mainly diet control and energy balance, have been widely applied to treat obesity; and some psychological interventions including behavioral therapy, cognitive behavioral therapy and hypnotherapy have showed some effects on obesity treatment. Other psychological therapies, such as relaxation and psychodynamic therapies, are paid less attention. This review aims to update scientific evidence regarding the mental consequences and psychological interventions for obesity.}, number = {1}, journal = {Diabetes and Metabolic Syndrome: Clinical Research and Reviews}, author = {Chu, Dinh Toi and Minh Nguyet, Nguyen Thi and Nga, Vu Thi and Thai Lien, Nguyen Vu and Vo, Duc Duy and Lien, Nguyen and Nhu Ngoc, Vo Truong and Son, Le Hoang and Le, Duc Hau and Nga, Vu Bich and Van Tu, Pham and Van To, Ta and Ha, Luu Song and Tao, Yang and Pham, Van Huy}, month = jan, year = {2019}, pmid = {30641689}, note = {Publisher: Elsevier Ltd}, keywords = {Obesity, Overweight, Psychological consequences, Psychological interventions}, pages = {155--160}, }
@article{Van_Huy_Pham_76157355, title = {Adipose tissue stem cells for therapy: {An} update on the progress of isolation, culture, storage, and clinical application}, volume = {8}, issn = {20770383}, url = {http://doi.org/10.3390/JCM8070917}, doi = {10.3390/jcm8070917}, abstract = {Adipose tissue stem cells (ASCs), known as multipotent stem cells, are most commonly used in the clinical applications in recent years. Adipose tissues (AT) have the advantage in the harvesting, isolation, and expansion of ASCs, especially an abundant amount of stem cells compared to bone marrow. ASCs can be found in stromal vascular fractions (SVF) which are easily obtained from the dissociation of adipose tissue. Both SVFs and culture-expanded ASCs exhibit the stem cell characteristics such as differentiation into multiple cell types, regeneration, and immune regulators. Therefore, SVFs and ASCs have been researched to evaluate the safety and benefits for human use. In fact, the number of clinical trials on ASCs is going to increase by years; however, most trials are in phase I and II, and lack phase III and IV. This systemic review highlights and updates the process of the harvesting, characteristics, isolation, culture, storage, and application of ASCs, as well as provides further directions on the therapeutic use of ASCs.}, number = {7}, journal = {Journal of Clinical Medicine}, author = {Chu, Dinh Toi and Phuong, Thuy Nguyen Thi and Tien, Nguyen Le Bao and Tran, Dang Khoa and Minh, Le Bui and Thanh, Vo Van and Anh, Pham Gia and Pham, Van Huy and Nga, Vu Thi}, year = {2019}, keywords = {Adipose tissue stem cells, Clinical application, Culture, Isolation, Stem cell therapy, Storage}, }
@article{Pham2019d, title = {Scalable local features and hybrid classifiers for improving action recognition}, volume = {36}, issn = {18758967}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85064617467%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.3233/JIFS-181085}, abstract = {In recent years, action recognition techniques have played an increasingly important role in autonomous systems. However, the computational costs and precision of action recognition algorithms are still major challenges. Recently, a deep learning approachwas proposed to obtain a higher accuracy, but large and deep neural networks have high computational costs. This paper presents a new approach that allows for a significant reduction in computational time while slightly increasing the accuracy. The contribution consists of two parts: a scalable feature extraction method (SFE) and a hybrid model of different classifiers. First, the SFE method is proposed for application to histogram orientation-based feature descriptors, such as the histogram of orientated gradient (HOG), histogram of optical flow (HOF), and the motion boundary histogram (MBH). An advantage of SFE is its ability to quickly compute features. Scalable feature extraction enables accurate approximation of features extracted from traditional image pyramids by efficiently using only the original image. Our method is inspired by a special data structure used for storing basic information of optical flow and image gradients, which are computed from the original image and then used to extract features across multiple scales of the feature region without recomputing the image gradients and optical flow. Second, we focus on a hybrid classification method based on a linear support vector machine (SVM) and hidden conditional random field (HCRF) model that improves the recognition precision. This effort shows that a combination of SVM nd HCRF models provides a better accuracy than the traditional approaches. Experimental results illustrate that the proposed approach allows for both a significant reduction in computational time and an improved accuracy.}, number = {4}, journal = {Journal of Intelligent and Fuzzy Systems}, author = {Pham, Van Huy and Jo, Kang Hyun and Hoang, Van Dung}, year = {2019}, note = {Publisher: IOS Press}, keywords = {Action recognition, Hybrid classification, Local feature descriptor, Scalable feature extraction}, pages = {3357--3372}, }
@article{Vo2019, title = {Vietnamese herbal plant recognition using deep convolutional features}, volume = {9}, issn = {20103700}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85067069451%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.18178/ijmlc.2019.9.3.811}, abstract = {Herbal plant image identification is able to help users without specialized knowledge about botany and plan systematics to find out the information of herbal plans, thus it has become an interdisciplinary focus in both botanical taxonomy and computer vision. A computer vision aided herbal plan identification system has been developed to meet the demand of recognizing and identifying herbal plants rapidly. In this paper, the first herbal plant image dataset collected by mobile phone in natural scenes is presented, which contains 10,000 images of 10 herbal plant species in Vietnam. A VGG16-based deep learning model consisting of 5 residual building blocks is used to extract features from the images. A comparative evaluation of seven classification methods using the same deep convolutional feature extraction method is presented. Experiments on our collected dataset demonstrate that deep learning features worked well with LightGBM classification method for herbal plant recognition in the natural environment with a recognition rate of 93.6\%.}, number = {3}, journal = {International Journal of Machine Learning and Computing}, author = {Vo, Anh H. and Dang, Hoa T. and Nguyen, Bao T. and Pham, Van Huy}, month = jun, year = {2019}, note = {Publisher: International Association of Computer Science and Information Technology}, keywords = {Deep feature, Deep learning, Herbal plant, Plant identification}, pages = {363--367}, }
@article{Pham2019, title = {Prediction of 6 months smoking cessation program among women in {Korea}}, volume = {9}, issn = {20103700}, doi = {10.18178/ijmlc.2019.9.1.769}, abstract = {Cigarette smoking is the leading cause of preventable death in a general population and it seems a significant topic in health research. The primary aim of this study determines the significant risk factors and investigates the prediction of 6 months smoking cessation program among women in Korea. In this regard, we examined real-world dataset about a smoking cessation program among the only women from Chungbuk Tobacco Control Center of Chungbuk National University College of Medicine in South Korea which collected from 2015 to 2017. Accordingly, we carried out to compare four machine learning techniques: Logistic regression (LR), Support Vector Machine (SVM), Random Forest (RF) and Naïve Bayes (NB) in order to predict response for successful or unsuccessful smoking quitters. Totally we analyzed 60 set of features that may affect the association between smoking cessation such as socio-demographic characteristics, smoking status for the age of starting, duration and others by employing a filter-based feature selection method. Respectively, we identified significant 8 factors which associated with smoking cessation. The experimental results demonstrate that NB performs better than other classifiers. Moreover, the performance of prediction models as measured by Accuracy, Precision, Recall, F-measure and ROC area. This finding has gone some way towards enhancing our better understanding of the significant factors contributing to smoking cessation program implementation and accompanying to concern public health.}, number = {1}, journal = {International Journal of Machine Learning and Computing}, author = {Davagdorj, Khishigsuren and Yu, Seon Hwa and Kim, So Young and Van Huy, Pham and Park, Jong Hyock and Ryu, Keun Ho}, year = {2019}, keywords = {Feature selection, Logistic regression, Naïve Bayes, Random forest, Smoking cessation, Support vector machine, Women}, pages = {83--90}, }
@article{Vo2019a, title = {Deep learning for {Vietnamese} {Sign} {Language} recognition in video sequence}, volume = {9}, issn = {20103700}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85071329211%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.18178/ijmlc.2019.9.4.823}, abstract = {With most of Vietnamese hearing impaired individuals, Vietnamese Sign Language (VSL) is the only choice for communication. Thus, there are more and more study about the automatic translation of VSL to make a bridge between hearing impaired people and normal ones. However, automatic VSL recognition in video brings many challenges due to the orientation of camera, hand position and movement, inter hand relation, etc. In this paper, we present some feature extraction approaches for VSL recognition including spatial and scene-based features. Instead of relying on a static image, we specifically capture motion information between frames in a video sequence. For the recognition task, beside the traditional method of sign language recognition such as SVM, we additionally propose to use deep learning technique for VSL recognition for finding the dependence of each frame in video sequences. We collected two VSL datasets of the relative family topic (VSL-WRF) like father, mother, uncle, aunt.... The first one includes 12 words in Vietnamese language which only have a little change between frames. While the second one contains 15 with gestures involving the relative position of the body parts and orientation of the motion. Moreover, the data augmentation technique is proposed to gain more information of hand movement and hand position. The experiments achieved the satisfactory results with accuracy of 88.5\% (traditional SVM) and 95.83\% (deep learning). It indicates that deep learning combining with data augmentation technique provides more information about the orientation or movement of hand, and it would be able to improve the performance of VSL recognition system.}, number = {4}, journal = {International Journal of Machine Learning and Computing}, author = {Vo, Anh H. and Pham, Van Huy and Nguyen, Bao T.}, month = aug, year = {2019}, note = {Publisher: International Association of Computer Science and Information Technology}, keywords = {Deep learning, Local descriptors, Motion-based feature, Scene-based feature, Spatial feature, VSL recognition, Vietnamese sign language (VSL)}, pages = {440--445}, }
@article{Pham2019, title = {Personal identification based on deep learning technique using facial images for intelligent surveillance systems}, volume = {9}, issn = {20103700}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85071334907%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.18178/ijmlc.2019.9.4.827}, abstract = {Identity recognition is a very important task in intelligent surveillance systems. Today, identity recognition systems have achieved high accuracy and widely used in specific application areas such as recognition system based on retina imaging in immigration inspection, civil security and citizen management. In these systems, human is required to be submissive for data acquisition to identify themselves. However, the automated monitoring systems are required to be active for information retrieval and human is passively monitored in this situation. In this kind of approach, human recognition is still a challenging task for the overall system performance. This study proposes a solution for human identification based on the human face recognition in images extracted from conventional cameras at a low resolution and quality. Our proposed approach for human identification is based on a deep learning method for feature extraction and classification for human identification using a similarity estimation. This approach was evaluated on some standard databases which are available online and also on our own collected dataset. The results from the comparison to the state of the art approach illustrate that our proposed approach achieves high accuracy and is suitable for practical applications.}, number = {4}, journal = {International Journal of Machine Learning and Computing}, author = {Pham, Van Huy and Tran, Diem Phuc and Hoang, Van Dung}, month = aug, year = {2019}, note = {Publisher: International Association of Computer Science and Information Technology}, keywords = {Face image, Feature extraction, Personal identification}, pages = {465--470}, }
@article{XuanHuynh2019, title = {Distributed {Framework} for {Automating} {Opinion} {Discretization} from {Text} {Corpora} on {Facebook}}, volume = {7}, issn = {21693536}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85068208355%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1109/ACCESS.2019.2922427}, abstract = {Nowadays, the consecutive increase of the volume of text corpora datasets and the countless research directions in general classification have created a great opportunity and an unprecedented demand for a comprehensive evaluation of the current achievement in the research of natural language processing. There are unfortunately few studies that have applied the combination of convolutional neural networks (CNN) and Apache Spark to the task of automating opinion discretization. In this paper, the authors propose a new distributed structure for solving an opinion classification problem in text mining by utilizing CNN models and big data technologies on Vietnamese text sources. The proposed framework consists of implementation concepts that are needed by a researcher to perform experiments on text discretization problems. It covers all the steps and components that are usually part of a completely practical text mining pipeline: Acquiring input data, processing, tokenizing it into a vectorial representation, applying machine learning algorithms, performing the trained models to unseen data, and evaluating their accuracy. The development of the framework started with a specific focus on binary text discretization, but soon expanded toward many other text-categorization-based problems, distributed language modeling and quantification. Several intensive assessments have been investigated to prove the robustness and efficiency of the proposed framework. Resulting in high accuracy (72.99\% ± 3.64) from the experiments, one can conclude that it is feasible to perform our proposed distributed framework to the task of opinion discretization on Facebook.}, journal = {IEEE Access}, author = {Xuan Huynh, Hiep and Nguyen, Vu Tuan and Duong-Trung, Nghia and Pham, Van Huy and Phan, Cang Thuong}, year = {2019}, note = {Publisher: Institute of Electrical and Electronics Engineers Inc.}, keywords = {Apache spark, TensorFlow, classification, convolutional neural networks, deep learning, opinion mining}, pages = {78675--78684}, }
@article{Kim2019, title = {Simplified neural network model design with sensitivity analysis and electricity consumption prediction in a commercial building}, volume = {12}, issn = {19961073}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85065539954%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.3390/en12071201}, abstract = {With growing urbanization, it has become necessary to manage this growth smartly. Specifically, increased electrical energy consumption has become a rapid urbanization trend in China. A building model based on a neural network was proposed to overcome the difficulties of analytical modelling. However, increased amounts of data, repetitive computation, and training time become a limitation of this approach. A simplified model can be used instead of the full order model if the performance is acceptable. In order to select effective data, Mean Impact Value (MIV) has been applied to select meaningful data. To verify this neural network method, we used real electricity consumption data of a shopping mall in China as a case study. In this paper, a Bayesian Regularization Neural Network (BRNN) is utilized to avoid overfitting due to the small amount of data. With the simplified data set, the building model showed reasonable performance. The mean of Root Mean Square Error achieved is around 10\% with respect to the actual consumption and the standard deviation is low, which reflects the model’s reliability. We also compare the results with our previous approach using the Levenberg–Marquardt back propagation (LM-BP) method. The main difference is the output reliability of the two methods. LM-BP shows higher error than BRNN due to overfitting. BRNN shows reliable prediction results when the simplified neural network model is applied.}, number = {7}, journal = {Energies}, author = {Kim, Moon Keun and Cha, Jaehoon and Lee, Eunmi and Pham, Van Huy and Lee, Sanghyuk and Theera-Umpon, Nipon}, month = mar, year = {2019}, note = {Publisher: MDPI AG}, keywords = {Bayesian regularization neural network, Building modelling, Energy management, Mean impact value, Simplified model}, }
@article{DBLP:journals/disopt/PhamNL19, title = {A linear time algorithm for balance vertices on trees}, volume = {32}, issn = {15725286}, url = {https://doi.org/10.1016/j.disopt.2018.11.001}, doi = {10.1016/j.disopt.2018.11.001}, abstract = {The concept of balance vertices was first investigated by Reid (1999). For the main result “the balance vertices of a tree consist of a single vertex or two adjacent vertices”, Shan and Kang (2004) and Reid and DePalma (2005) improved the length and technique of the proof. In this paper we further discuss the balance vertices on trees in a generalization context. We do not only provide a simple efficient proof for the relevant result but also develop a linear time algorithm to find the set of balance vertices on the underlying tree.}, journal = {Discrete Optimization}, author = {Pham, Van Huy and Nguyen, Kien Trung and Le, Tran Thu}, year = {2019}, keywords = {Balance vertices, Complexity, Tree}, pages = {37--42}, }
@article{DBLP:journals/tcs/PhamN19, title = {Inverse 1-median problem on trees under mixed rectilinear and {Chebyshev} norms}, volume = {795}, issn = {03043975}, url = {https://doi.org/10.1016/j.tcs.2019.05.039}, doi = {10.1016/j.tcs.2019.05.039}, abstract = {We consider in this paper the inverse 1-median problem on trees with variable vertex weights, which are partitioned into groups. While the modifications in each group are measured by Chebyshev norm, rectilinear norm is applied for computing the cost of groups; and vice versa. As a result, it yields the sum of max and the max of sum objective functions. For the sum of max objective, we develop an O(MlogM) time algorithm based on a parameterization approach, where M is the number of vertices in the tree. On the other hand, the problem under the max of sum objective can be solved in linear time by an algorithm that prunes a half of indices in each iteration.}, journal = {Theoretical Computer Science}, author = {Pham, Van Huy and Nguyen, Kien Trung}, year = {2019}, keywords = {Chebyshev, Inverse optimization, Knapsack, Median problem, Trees}, pages = {119--127}, }
@article{Lee2019, title = {Analysis of public complaints to identify priority policy areas: {Evidence} from a {Satellite} {City} around {Seoul}}, volume = {11}, issn = {20711050}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85074809915%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.3390/su11216140}, abstract = {Conventional studies on policy demand identification that are anchored in big data on urban residents are limited in that they mostly involve the top-down and government-oriented use of such data. It restricts treatment to specific issues (e.g., public safety and disaster management), even from the beginning of data collection. Scant research has emphasized the general use of data on civil complaints-which are independent of areas of application-in the examination of sustainable cities. In this work, we hypothesized that the analyses of civil complaint data and big data effectively identify what urban residents want from local governments with respect to a broad range of issues. We investigated policy demand using big data analytics in examining unstructured civil complaint data on safety and disaster management. We extracted major keywords associated with safety and disaster management via text mining to inquire into the relevant matters raised in the civil complaints. We also conducted a panel analysis to explore the effects exerted by the characteristics of 16 locally governed towns on residents' policy demands regarding safety and disaster management-related complaints. The results suggest that policy needs vary according to local sociocultural characteristics such as the age, gender, and economic status of residents as well as the proportion of migrants in these localities, so that, city governments need to provide customized services. This research contributes to extend with more advanced big data analysis techniques such as text mining, and data fusion and integration. The technique allows the government to identify more specifically citizens' policy needs.}, number = {21}, journal = {Sustainability (Switzerland)}, author = {Lee, Eunmi and Lee, Sanghyuk and Kim, Kyeong Soo and Pham, Van Huy and Sul, Jinbae}, month = nov, year = {2019}, note = {Publisher: MDPI AG}, keywords = {Big data, Civil complaints, Panel analysis, Policy demand, Safety and crisis management, Sustainable urban, Text mining}, }
@article{Lee2019a, title = {Neural-network-based building energy consumption prediction with training data generation}, volume = {7}, issn = {22279717}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85074231608%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.3390/pr7100731}, abstract = {The importance of neural network (NN) modelling is evident from its performance benefits in a myriad of applications, where, unlike conventional techniques, NN modeling provides superior performance without relying on complex filtering and/or time-consuming parameter tuning specific to applications and their wider ranges of conditions. In this paper, we employ NN modelling with training data generation based on sensitivity analysis for the prediction of building energy consumption to improve performance and reliability. Unlike our previous work, where insignificant input variables are successively screened out based on their mean impact values (MIVs) during the training process, we use the receiver operating characteristic (ROC) plot to generate reliable data with a conservative or progressive point of view, which overcomes the issue of data insufficiency of the MIV method: By properly setting boundaries for input variables based on the ROC plot and their statistics, instead of completely screening them out as in the MIV-based method, we can generate new training data that maximize true positive and false negative numbers from the partial data set. Then a NN model is constructed and trained with the generated training data using Levenberg-Marquardt back propagation (LM-BP) to perform electricity prediction for commercial buildings. The performance of the proposed data generation methods is compared with that of the MIV method through experiments, whose results show that data generation using successive and cross pattern provides satisfactory performance, following energy consumption trends with good phase. Among the two options in data generation, i.e., successive and two data combination, the successive option shows lower root mean square error (RMSE) than the combination one by around 400{\textasciitilde}900 kWh (i.e., 30\%{\textasciitilde}75\%).}, number = {10}, journal = {Processes}, author = {Lee, Sanghyuk and Cha, Jaehoon and Kim, Moon Keun and Kim, Kyeong Soo and Pham, Van Huy and Leach, Mark}, year = {2019}, note = {Publisher: MDPI AG}, keywords = {Building modelling, Energy management, Mean impact value (MIV), Neural network (NN), Receiver operating characteristic (ROC)}, }
@article{doi:10.1080/02331934.2019.1571056, title = {On some inverse 1-center location problems}, volume = {68}, issn = {10294945}, url = {https://doi.org/10.1080/02331934.2019.1571056}, doi = {10.1080/02331934.2019.1571056}, abstract = {This paper addresses two problems, the inverse 1-center problem on the line with closed-interval facilities and the inverse 1-center problem on Rd. For the first problem, we develop a combinatorial O(n log n) algorithm based on the convexity of the objective function, where n is the number of facilities. We also discuss the corresponding problem on interval graphs with the similar solution approach. Concerning the inverse 1-center problem on Rd, we propose an O(dn2 log n) algorithm based on the optimality criterion, where n is the number of existing points.}, number = {5}, journal = {Optimization}, author = {Nguyen, Kien Trung and Hung, Nguyen Thanh and Nguyen-Thu, Huong and Le, Tran Thu and Pham, Van Huy}, year = {2019}, note = {Publisher: Taylor \& Francis}, keywords = {1-center, Location problem, interval graph, inverse optimization}, pages = {999--1015}, }
@article{Van_Huy_Pham_70119977, title = {A combinatorial algorithm for the ordered 1-median problem on cactus graphs}, volume = {56}, issn = {09750320}, url = {http://doi.org/10.1007/s12597-019-00402-2}, doi = {10.1007/s12597-019-00402-2}, abstract = {Cactus graph is a graph in which any two simple cycles has at most one vertex in common. In this paper we address the ordered 1-median location problem on cactus graphs, a generalization of some popular location models such as 1-median, 1-center, and 1-centdian problems. For the case with non-decreasing multipliers, we show that there exists a cycle or an edge that contains an ordered 1-median. Based on this property, we develop a combinatorial algorithm that finds an ordered 1-median on a cactus in O(n2log n) time, where n is the number of vertices in the underlying cactus.}, number = {3}, journal = {Opsearch}, author = {Pham, Van Huy and Tam, Nguyen Chi}, year = {2019}, keywords = {Cactus, Convex, Location problem, Ordered 1-median}, pages = {780--789}, }
@article{DBLP:journals/jucs/Theera-UmponHBL18, title = {Verifying secure authentication protocol for communication between {IoT}-based medical devices}, volume = {24}, issn = {09486968}, url = {http://www.jucs.org/jucs%5C_24%5C_9/verifying%5C_secure%5C_authentication%5C_protocol}, doi = {https://doi.org/10.3217/jucs-024-09-1258}, abstract = {The evolving Internet of Things (IoT) technology has driven the advancement of communication technology for implantable devices and relevant services. Still, concerns are raised over implantable medical devices (IMDs), because the wireless transmission section between patients and devices is liable to intrusions on privacy attributable to hacking attacks and resultant leakage of patients’ personal information. Also, manipulating and altering patients’ medical information may lead to serious leakage of personal information and thus adverse medical incidents. To address the foregoing challenges, the present paper proposes a security protocol that copes with a range of vulnerabilities in communication between IMDs and other devices. In addition, the proposed protocol encrypts the communication process and data to eliminate the likelihood of personal information being leaked. The verification highlights the safety and security of the proposed protocol in wireless communication.}, number = {9}, journal = {Journal of Universal Computer Science}, author = {Theera-Umpon, Nipon and Han, Kun Hee and Bae, Woo Sik and Lee, Sanghyuk and Pham, Van Huy}, year = {2018}, keywords = {Authentication protocol, Casper, Integrated authentication protocol, Model checking, Security policy, U-Healthcare Service}, pages = {1258--1270}, }
@incollection{Hoang_2018, title = {Improving {Traffic} {Signs} {Recognition} {Based} {Region} {Proposal} and {Deep} {Neural} {Networks}}, volume = {10752 LNAI}, isbn = {978-3-319-75419-2}, url = {http://dx.doi.org/10.1007/978-3-319-75420-8_57}, abstract = {Nowadays, traffic sign recognition has played an important task in autonomous vehicle, intelligent transportation systems. However, it is still a challenging task due to the problems of a variety of color, shape, environmental conditions. In this paper, we propose a new approach for improving accuracy of traffic sign recognition. The contribution of this work is three-fold: First, region proposal based on segmentation technique is applied to cluster traffic signs into several sub regions depending upon the supplemental signs and the main sign color. Second, image augmentation of training dataset generates a larger data for deep neural network learning. This proposed task is aimed to address the small data problem. It is utilized for enhancing capabilities of deep learning. Finally, we design appropriately a deep neural network to image dataset, which combines the original images and proposal images. The proposed approach was evaluated on a benchmark dataset. Experimental evaluation on public benchmark dataset shows that the proposed approach enhances performance to 99.99\% accuracy. Comparison results illustrated that our proposed method reaches higher performance than almost state-of-the-art methods.}, booktitle = {Lecture {Notes} in {Computer} {Science} (including subseries {Lecture} {Notes} in {Artificial} {Intelligence} and {Lecture} {Notes} in {Bioinformatics})}, publisher = {Springer International Publishing}, author = {Hoang, Van Dung and Le, My Ha and Tran, Truc Thanh and Pham, Van Huy}, year = {2018}, doi = {10.1007/978-3-319-75420-8_57}, note = {ISSN: 16113349}, keywords = {Data augmentation, Deep neural networks, Region proposal, Traffic sign recognition}, pages = {604--613}, }
@inproceedings{Le2018, title = {Encoded {Communication} {Based} on {Sonar} and {Ultrasonic} {Sensor} in {Motion} {Planning}}, volume = {2018-Octob}, isbn = {978-1-5386-4707-3}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85060879248%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1109/ICSENS.2018.8589706}, abstract = {In this paper, we will be focusing on the application of sensor communication by implementing Bug algorithms into motion planning and search-based planning. We have learned and researched about the bug algorithm family and then applied the algorithm into programming a robot. For this project, we will be programming a RP6 Robot kit with one of the three bug variation: bug 1, bug 2 and tangent bug so that it can move from a known starting point to a known end point while navigating an unknown obstacle course between the two points. This project is an introduction to the field of motion planning in robotics, which provides the basics for building a fully-automated robot with environmental awareness. Not included in this project is discussion of other motion planning algorithm family, this paper focuses solely on the bug algorithm and its programming application.}, booktitle = {Proceedings of {IEEE} {Sensors}}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, author = {Le, Than D. and Bui, Duy T. and Pham, Vanhuy}, month = dec, year = {2018}, note = {ISSN: 21689229}, keywords = {bug algorithms, motion planning, obstacle avoidance, swarm mobile robotics}, }
@inproceedings{DBLP:conf/aciids/PhamDN18, title = {{CNN}-{Based} {Character} {Recognition} for {License} {Plate} {Recognition} {System}}, volume = {10752 LNAI}, isbn = {978-3-319-75419-2}, url = {https://doi.org/10.1007/978-3-319-75420-8%5C_56}, doi = {10.1007/978-3-319-75420-8_56}, abstract = {License Plate Recognition is a practical use of computer vision based application. With the increase in demand of automation transportation systems, this application plays a very big role in the system development. Also, the use of vehicles has been increasing because of population growth and human needs in recent years makes the application is more challenging. Moreover, license plates are available in diverse colors and style and that the presence of noise, blurring in the image, uneven illumination, and occlusion makes the task even more difficult for conventional recognition methods. We propose an approach of using a Convolutional Neural Networks (CNN) classifier for the recognition. Pre-processing techniques are firstly applied on input images, such as filtering, thresholding, and then segmentation. Then, we train a CNN classifier for character recognition. Although the performance of a CNN is very impressive, it costs much time to complete the character recognition step. In this study, a modified CNN is proposed to help the system run in real-time. Experimental results have done and analyzed with other methods.}, booktitle = {Lecture {Notes} in {Computer} {Science} (including subseries {Lecture} {Notes} in {Artificial} {Intelligence} and {Lecture} {Notes} in {Bioinformatics})}, publisher = {Springer}, author = {Pham, Van Huy and Dinh, Phong Quang and Nguyen, Van Huan}, editor = {Nguyen, Ngoc Thanh and Hoang, Duong Hung and Hong, Tzung-Pei and Pham, Hoang and Trawinski, Bogdan}, year = {2018}, note = {Series Title: Lecture Notes in Computer Science ISSN: 16113349}, keywords = {Character recognition, Convolution Neural Network, License Plate Recognition System}, pages = {594--603}, }
@article{Chu2018, title = {An update on physical health and economic consequences of overweight and obesity}, volume = {12}, issn = {18780334}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85048896674%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1016/j.dsx.2018.05.004}, abstract = {Overweight and obesity (OW and OB) have been on the increase globally and posed health risks to the world's population of all ages, including pre-born babies, children, adolescents, adults and elderly people, via their comorbid conditions. Excellent examples of comorbidities associated with obesity include cancer, cardiovascular diseases (CVD) and type 2 diabetes mellitus (T2DM). In this article, we aimed to review and update scientific evidence regarding the relationships between obesity and its common physical health consequences, including CVD, T2DM, hypertension, ischemic stroke, cancer, dyslipidemia and reproductive disorders. In addition, the economic burden of OW and OB will be discussed. Abundant evidence is found to support the associations between obesity and other diseases. In general, the odd ratios, risk ratios or hazard ratios are often higher in OW and OB people than in the normal-weight ones. However, the molecular mechanism of how OW and OB induce the development of other diseases has not been fully understood. Figures also showed that obesity and its-related disorders exert enormous pressure on the economy which is projected to increase. This review highlights the fact that obesity can lead to numerous lethal health problems; therefore, it requires a lot of economic resources to fight against this epidemic.}, number = {6}, journal = {Diabetes and Metabolic Syndrome: Clinical Research and Reviews}, author = {Chu, Dinh Toi and Minh Nguyet, Nguyen Thi and Dinh, Thien Chu and Thai Lien, Nguyen Vu and Nguyen, Khanh Hoang and Nhu Ngoc, Vo Truong and Tao, Yang and Son, Le Hoang and Le, Duc Hau and Nga, Vu Bich and Jurgoński, Adam and Tran, Quoc Hung and Van Tu, Pham and Pham, Van Huy}, month = nov, year = {2018}, pmid = {29799416}, note = {Publisher: Elsevier Ltd}, keywords = {Economic burden, Metabolic disorder, Obesity, Overweight, Physical health consequences}, pages = {1095--1100}, }
@incollection{Van_Huy_Pham_70119992, title = {A two-stage detection approach for car counting in day and nighttime}, volume = {672}, isbn = {978-981-10-7511-7}, url = {http://doi.org/10.1007/978-981-10-7512-4%5C_16}, abstract = {We developed a car counting system using car detection methods for both daytime and nighttime traffic scenes. The detection methods comprise two stages: car hypothesis generation and hypothesis verification. For daytime traffic scenes, we proposed a new car hypothesis generation by rapidly locating car windshield regions, which are used to estimate car positions in occlusion situations. For car hypothesis at nighttime, we proposed an approach using k-means clustering-based segmentation to find headlight candidates to facilitate the later pairing process. Counting decision is made from Kalman filter-based tracking, followed by rule-based verification. The results evaluated on real-world traffic videos show that our system can work well in different conditions of lighting and occlusion.}, booktitle = {Advances in {Intelligent} {Systems} and {Computing}}, author = {Pham, Van Huy and Le, Duc Hau}, year = {2018}, doi = {10.1007/978-981-10-7512-4_16}, note = {ISSN: 21945357}, pages = {159--171}, }
@article{Le2018a, title = {Drug {Response} {Prediction} by {Globally} {Capturing} {Drug} and {Cell} {Line} {Information} in a {Heterogeneous} {Network}}, volume = {430}, issn = {10898638}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85049755850%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1016/j.jmb.2018.06.041}, abstract = {One of the most important problem in personalized medicine research is to precisely predict the drug response for each patient. Due to relationships between drugs, recent machine learning-based methods have solved this problem using multi-task learning models. However, chemical relationships between drugs have not been considered. In addition, using very high dimensions of -omics data (e.g., genetic variant and gene expression) also limits the prediction power. A recent dual-layer network-based method was proposed to overcome these limitations by embedding gene expression features into a cell line similarity network and drug relationships in a chemical structure-based drug similarity network. However, this method only considered neighbors of a query drug and a cell line. Previous studies also reported that genetic variants are less informative to predict an outcome than gene expression. Here, we develop a novel network-based method, named GloNetDRP, to overcome these limitations. Besides gene expression, we used the genetic variant to build another cell line similarity network. First, we constructed a heterogeneous network of drugs and cell lines by connecting a drug similarity network and a cell line similarity network by known drug–cell line responses. Then, we proposed a method to predict the responses by exploiting not only the neighbors but also other drugs and cell lines in the heterogeneous network. Experimental results on two large-scale cell line data sets show that prediction performance of GloNetDRP on gene expression and genetic variant data is comparable. In addition, GloNetDRP outperformed dual-layer network- and typical multi-task learning-based methods.}, number = {18}, journal = {Journal of Molecular Biology}, author = {Le, Duc Hau and Pham, Van Huy}, month = sep, year = {2018}, pmid = {29966608}, note = {Publisher: Academic Press}, keywords = {drug similarity network, gene expression-based cell line similarity network, genetic variant-based cell line similarity network, global drug response prediction, heterogeneous network of drugs and cell lines}, pages = {2993--3004}, }
@inproceedings{Le2017, title = {An ensemble learning-based method for prediction of novel disease-microrna associations}, volume = {2017-Janua}, isbn = {978-1-5386-3576-6}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85043686097%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1109/KSE.2017.8119426}, abstract = {Many studies have shown the associations of microRNAs on human diseases. A number of computational methods have been proposed to predict such associations by ranking candidate microRNAs ac-cording to their relevance to a disease. Among them, network-based methods are usually based on microRNA functional similarity networks which are constructed based on microRNA-target interactions. Therefore, the prediction performances of these methods are highly dependent on the quality of such interactions which are usually predicted by computational methods. Meanwhile, machine learning-based methods usually formulate the disease miRNA prediction as a classification problem, where novel associations between disease and miRNA are predicted based on known disease-miRNA associations. However, those methods are mainly based on single binary classifiers; therefore, they have a limitation in prediction performance. In this study, we proposed a new method, namely RFMDA, to predict disease-associated miRNAs. Our method based on Random Forest (RF), an ensemble technique, where the final classifier is constructed by multitude of decision trees, to perform the prediction. In order to compare with other previous methods, we use the same procedure to build training samples, where positive training samples are known disease-miRNA associations. In addition, features of each sample measure either functional or phenotypical similarities between miRNAs or phenotypes, respectively. Simulation results showed that RFMDA outperformed previous learning-based methods including two binary classifiers (i.e., Naïve Bayes and two-class Support Vector Machines) and one semi-supervised classifier (i.e., Regularized Least Square). Moreover, using the trained model, we can predict novel miRNAs associated to some diseases such as breast cancer, colorectal cancer and hepatocellular carcinoma.}, booktitle = {Proceedings - 2017 9th {International} {Conference} on {Knowledge} and {Systems} {Engineering}, {KSE} 2017}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, author = {Le, Duc Hau and Pham, Van Huy and Nguyen, Thuy Thi}, month = nov, year = {2017}, keywords = {Binary classifier, Ensemble learning, Prediction of disease-miRNA association, Semi-supervised classifier}, pages = {7--12}, }
@inproceedings{Pham2017, title = {Boosting discriminative models for activity detection using local feature descriptors}, volume = {10191 LNAI}, isbn = {978-3-319-54471-7}, doi = {10.1007/978-3-319-54472-4_57}, abstract = {This paper presents a method for daily living activity prediction based on boosting discriminative models. The system consists of several steps. First, local feature descriptors are extracted from multiple scales of the sequent images. In this experiment, the basic feature descriptors based on HOG, HOF, MBH are considered to process. Second, local features based BoW descriptors are studied to construct feature vectors, which are then fed to classification machine. The BoW feature extraction is a pre-processing step, which is utilized to avoid strong correlation data, and to distinguish feature properties for uniform data for classification machine. Third, a discriminative model is constructed using the BoW features, which is based on the individual local descriptor. Sequentially, final decision of action classes is done by the classifier using boosting discriminative models. Different to previous contributions, the sequent-overlap frames are considered to convolute and infer action classes instead of an individual set of frames is used for prediction. An advantage of boosting is that it supports to construct a strong classifier based on a set of weak classifiers associated with appropriate weights to obtain results in high performance. The method is successfully tested on some standard databases.}, booktitle = {Lecture {Notes} in {Computer} {Science} (including subseries {Lecture} {Notes} in {Artificial} {Intelligence} and {Lecture} {Notes} in {Bioinformatics})}, author = {Pham, Van Huy and Le, My Ha and Van-Dung, Hoang}, year = {2017}, note = {ISSN: 16113349}, keywords = {Action recognition, Boosting discriminative models, Histograms of oriented gradients, Motion boundary, Optical flow}, pages = {609--618}, }
@article{Van_Huy_Pham_70120219, title = {Design and evaluation of features and classifiers for oled panel defect recognition in machine vision}, volume = {1}, issn = {24751847}, url = {http://doi.org/10.1080/24751839.2017.1355717}, doi = {10.1080/24751839.2017.1355717}, abstract = {With the rapid growth of organic light-emitting diode (OLED) display devices, the industrial manufacturing of OLED panels is currently an expanding global reality. Regarding quality control, automatic defect detection and classification are undoubtedly indispensable. Although defect detection systems have been widely considered in the literature, classification systems have not received appropriate attention. This study proposes the design of an efficient and high-performance system for defect classification by combining well-known machine-learning algorithms: support vector machine, random forest (RF), and k-nearest neighbours. To begin, possible features are designed and feature selection using principal component analysis and RF is investigated to automatically select the most effective features. Then, a hierarchical structure of classifiers is proposed for efficiently adjusting the rates of true defect and fake defect classification. The proposed system is evaluated over a database of 3502 images captured from real OLED display devices in different illumination conditions. The defects in the database are divided into 10 classes corresponding to the types of true defect and fake defect. The experiments confirm that the proposed system can achieve an accuracy of up to 94.0\% for the binary classification of true defect and fake defect and an overall recognition rate of 86.3\% for the 10 sub-classes.}, number = {4}, journal = {Journal of Information and Telecommunication}, author = {Nguyen, Van Huan and Pham, Van Huy and Cui, Xuenan and Ma, Mingjie and Kim, Hakil}, year = {2017}, keywords = {Feature extraction, Feature selection, KNN, OLED defects, Random forests, SVM}, pages = {334--350}, }
@article{Le2017b, title = {{HGPEC}: {A} {Cytoscape} app for prediction of novel disease-gene and disease-disease associations and evidence collection based on a random walk on heterogeneous network}, volume = {11}, issn = {17520509}, doi = {10.1186/s12918-017-0437-x}, abstract = {Background: Finding gene-disease and disease-disease associations play important roles in the biomedical area and many prioritization methods have been proposed for this goal. Among them, approaches based on a heterogeneous network of genes and diseases are considered state-of-the-art ones, which achieve high prediction performance and can be used for diseases with/without known molecular basis. Results: Here, we developed a Cytoscape app, namely HGPEC, based on a random walk with restart algorithm on a heterogeneous network of genes and diseases. This app can prioritize candidate genes and diseases by employing a heterogeneous network consisting of a network of genes/proteins and a phenotypic disease similarity network. Based on the rankings, novel disease-gene and disease-disease associations can be identified. These associations can be supported with network- and rank-based visualization as well as evidences and annotations from biomedical data. A case study on prediction of novel breast cancer-associated genes and diseases shows the abilities of HGPEC. In addition, we showed prominence in the performance of HGPEC compared to other tools for prioritization of candidate disease genes. Conclusions: Taken together, our app is expected to effectively predict novel disease-gene and disease-disease associations and support network- and rank-based visualization as well as biomedical evidences for such the associations.}, number = {1}, journal = {BMC Systems Biology}, author = {Le, Duc Hau and Pham, Van Huy}, year = {2017}, pmid = {28619054}, note = {Publisher: BioMed Central}, keywords = {Cytoscape app, Disease prioritization, Disease-disease association, Disease-gene association, Gene prioritization, Heterogeneous network, Random walk with restart algorithm}, pages = {61}, }
@article{Le2017a, title = {Random walks on mutual {microRNA}-target gene interaction network improve the prediction of disease-associated {microRNAs}}, volume = {18}, issn = {14712105}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-85033790559%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.1186/s12859-017-1924-1}, abstract = {Background: MicroRNAs (miRNAs) have been shown to play an important role in pathological initiation, progression and maintenance. Because identification in the laboratory of disease-related miRNAs is not straightforward, numerous network-based methods have been developed to predict novel miRNAs in silico. Homogeneous networks (in which every node is a miRNA) based on the targets shared between miRNAs have been widely used to predict their role in disease phenotypes. Although such homogeneous networks can predict potential disease-associated miRNAs, they do not consider the roles of the target genes of the miRNAs. Here, we introduce a novel method based on a heterogeneous network that not only considers miRNAs but also the corresponding target genes in the network model. Results: Instead of constructing homogeneous miRNA networks, we built heterogeneous miRNA networks consisting of both miRNAs and their target genes, using databases of known miRNA-target gene interactions. In addition, as recent studies demonstrated reciprocal regulatory relations between miRNAs and their target genes, we considered these heterogeneous miRNA networks to be undirected, assuming mutual miRNA-target interactions. Next, we introduced a novel method (RWRMTN) operating on these mutual heterogeneous miRNA networks to rank candidate disease-related miRNAs using a random walk with restart (RWR) based algorithm. Using both known disease-associated miRNAs and their target genes as seed nodes, the method can identify additional miRNAs involved in the disease phenotype. Experiments indicated that RWRMTN outperformed two existing state-of-the-art methods: RWRMDA, a network-based method that also uses a RWR on homogeneous (rather than heterogeneous) miRNA networks, and RLSMDA, a machine learning-based method. Interestingly, we could relate this performance gain to the emergence of "disease modules" in the heterogeneous miRNA networks used as input for the algorithm. Moreover, we could demonstrate that RWRMTN is stable, performing well when using both experimentally validated and predicted miRNA-target gene interaction data for network construction. Finally, using RWRMTN, we identified 76 novel miRNAs associated with 23 disease phenotypes which were present in a recent database of known disease-miRNA associations. Conclusions: Summarizing, using random walks on mutual miRNA-target networks improves the prediction of novel disease-associated miRNAs because of the existence of "disease modules" in these networks.}, number = {1}, journal = {BMC Bioinformatics}, author = {Le, Duc Hau and Verbeke, Lieven and Son, Le Hoang and Chu, Dinh Toi and Pham, Van Huy}, month = nov, year = {2017}, pmid = {29137601}, note = {Publisher: BioMed Central Ltd.}, keywords = {Disease-associated microRNAs, MicroRNA targets, Network analysis, Random walk with restart}, }
@article{Pham2016, title = {A two-stage approach for front-view vehicle detection and counting at nighttime}, volume = {54}, issn = {2261236X}, url = {http://www.matec-conferences.org/10.1051/matecconf/20165408001}, doi = {10.1051/matecconf/20165408001}, abstract = {In this paper, we introduce an approach to car detection and counting using a two-stage method of car dectection and counting. For car hypothesis, we propose a method of headlight candidate extraction using k-means clustering based segmentation which is used as a multi-thresholding method to segment the gray-image of the traffic scene with the lowest level and highest level of intensities included in seed configuration. In verification stage, both individual and paired headlights are tracked during their existence in the ROI. Kalman filter is used to track the detected headlights and counting decision is given when the pairs of headlights follow specified counting rules. The experiments are evaluated on real world traffic videos at the resolution of 640x480, 15fps. The method is robust with various situations of illumination at nighttime.}, journal = {MATEC Web of Conferences}, author = {Pham, Van Huy and Le, Duc Hau}, editor = {Abou-El-Hossein, K.}, month = apr, year = {2016}, pages = {08001}, }
@article{Van_Huy_Pham_70120028, title = {Front-view car detection and counting with occlusion in dense traffic flow}, volume = {13}, issn = {20054092}, url = {http://doi.org/10.1007/s12555-014-0229-7}, doi = {10.1007/s12555-014-0229-7}, abstract = {In dense traffic flow, car occlusion is usually one of the great challenges of vehicle detection and tracking in traffic monitoring systems. Current methods of car hypothesis such as symmetry or shadow based method work only with non-occluded cars. In this paper, we proposed an approach to car detection and counting using a new method of car hypothesis based on car windshield appearance which is the most feasible cue to hypothesize cars in occlusion situations. In hypothesis stage, Hough transformation is used to detect trapezoid-like regions where a car’s windshield could be located, and then candidate car regions are estimated by the windshield region and its size. In verification stage, HOG descriptor and a well-collected dataset are used to train a linear SVM classifier for detecting cars at a high accuracy rate. Then, a tracking process based on Kalman filter is used to track the movement of detected cars in consecutive frames of traffic videos, followed by rule-based reasoning for counting decision. Experimental results on real traffic videos showed that the system is able to detect, track and count multiple cars including occlusion in dense traffic flow in real-time.}, number = {5}, journal = {International Journal of Control, Automation and Systems}, author = {Van Pham, Huy and Lee, Byung Ryong}, year = {2015}, keywords = {Car counting, HOG, Hough transform, tracking, traffic monitoring, vehicle detection}, pages = {1150--1160}, }
@article{Van_Huy_Pham_70119896, title = {An image segmentation approach for fruit defect detection using k-means clustering and graph-based algorithm}, volume = {2}, issn = {2196-8888}, url = {http://doi.org/10.1007/s40595-014-0028-3}, doi = {10.1007/s40595-014-0028-3}, abstract = {Machine vision has been introduced in variety of industrial applications for fruit processing, allowing the automation of tasks performed so far by human operators. Such an important task is the detection of defects present on fruit peel which helps to grade or to classify fruit quality. Image segmentation is usually the first step in detecting flaws in fruits and its result mainly affects the accuracy of the system. A diversity of methods of automatic segmentation for fruit images has been developed. In this paper, a hybrid algorithm, which is based on split and merge approach, is proposed for an image segmentation that can be used in fruit defect detection. The algorithm firstly uses k-means algorithm to split the original image into regions based on Euclidean color distance in \$\$L{\textasciicircum}*a{\textasciicircum}*b{\textasciicircum}*\$\$ L * a * b * space to produce an over-segmentation result. Then, based on a graph representation, a merge procedure using minimum spanning tree is then taken into account to iteratively merge similar regions into new homogenous ones. This combination is an efficient approach to employ the local and global characteristic of intensities in the image. The experiment showed good results in the terms of human observation and in processing time.}, number = {1}, journal = {Vietnam Journal of Computer Science}, author = {Pham, Van Huy and Lee, Byung Ryong}, year = {2015}, pages = {25--33}, }
@article{DBLP:journals/ijprai/TruongPL13, title = {New vehicle detection algorithm using symmetry search and {GA}-based {SVM}}, volume = {27}, issn = {02180014}, url = {https://doi.org/10.1142/S0218001413550033}, doi = {10.1142/S0218001413550033}, abstract = {In this paper, we present a two-stage vision-based approach to detect front and rear vehicle views in road scene images. The first stage is hypothesis generation (HG), in which potential vehicles are hypothesized. During the HG step, we use a vertical, horizontal edge map, and different colors between road background and the lower part of vehicle to determine the bottom position of the vehicle. Next, we apply vertical symmetry axis detection into contour edge images to build the potential regions where vehicles may be presented. The second stage is hypothesis verification (HV). In this stage, all hypotheses are verified by Decision Tree (DT) training combined with a modified Genetic Algorithm (GA) to find the best features subset based on Haar-like feature extraction and an appropriate parameters set of Support Vector Machine for classification, which is robust for front and rear views of vehicle detection and recognition problems. © 2013 World Scientific Publishing Company.}, number = {2}, journal = {International Journal of Pattern Recognition and Artificial Intelligence}, author = {Truong, Quoc Bao and Pham, Van Huy and Lee, Byung Ryong}, year = {2013}, keywords = {Vision-based, decision tree (DT), different color method, genetic algorithm (GA), hypothesis generation (HG), hypothesis verification (HV), repair horizontal edges, support vector machine (SVM), vehicle detection, vertical symmetry axis detection}, }
@article{Lee2011, title = {Automatic thresholding selection for image segmentation based on genetic algorithm}, volume = {17}, issn = {19765622}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84861172295%7B%5C&%7DpartnerID=MN8TOARS}, doi = {10.5302/J.ICROS.2011.17.6.587}, abstract = {In this paper, we focus on the issue of automatic selection for multi-level threshold, and we greatly improve the efficiencyof Otsu's method for image segmentation based on genetic algorithm. We have investigated and evaluated the performance of theOtsu and Valley-emphasis threshold methods. Based on this observation we propose a method for automatic threshold method thatsegments an image into more than two regions with high performance and processing in real-time. Our paper introduced new peakdetection, combines with evolution algorithm using MAGA (Modified Adaptive Genetic Algorithm) and HCA (Hill ClimbingAlgorithm), to find the best threshold automatically, accurately, and quickly. The experimental results show that the proposedevolutionary algorithm achieves a satisfactory segmentation effect and that the processing time can be greatly reduced when thenumber of thresholds increases. © ICROS 2011.}, number = {6}, journal = {Journal of Institute of Control, Robotics and Systems}, author = {Lee, Byung Ryong and Truong, Quoc Bao and Pham, Van Huy and Kim, Hyoung Seok}, month = jun, year = {2011}, keywords = {Automatic threshold, Genetic algorithm, Image segmentation, Otsu's method, Valley-emphasis method}, pages = {587--595}, }