abstract bibtex

Most of the Bayesian network-based classifiers are usually only able to handle discrete variables. However, most real-world domains involve continuous variables. A common practice to deal with continuous variables is to discretize them, with a subsequent loss of information. This work shows how discrete classifier induction algorithms can be adapted to the conditional Gaussian network paradigm to deal with continuous variables without discretizing them. In addition, three novel classifier induction algorithms and two new propositions about mutual information are introduced. The classifier induction algorithms presented are ordered and grouped according to their structural complexity: naive Bayes, tree augmented naive Bayes, k-dependence Bayesian classifiers and semi naive Bayes. All the classifier induction algorithms are empirically evaluated using predictive accuracy, and they are compared to linear discriminant analysis, as a continuous classic statistical benchmark classifier. Besides, the accuracies for a set of state-of-the-art classifiers are included in order to justify the use of linear discriminant analysis as the benchmark algorithm. In order to understand the behavior of the conditional Gaussian network-based classifiers better, the results include bias-variance decomposition of the expected misclassification rate. The study suggests that semi naive Bayes structure based classifiers and, especially, the novel wrapper condensed semi naive Bayes backward, outperform the behavior of the rest of the presented classifiers. They also obtain quite competitive results compared to the state-of-the-art algorithms included.

@article{ title = {Supervised classification with conditional Gaussian networks: Increasing the structure complexity from naive Bayes}, type = {article}, year = {2006}, keywords = {bayesian network,conditional gaussian network,filter,k-dependence bayesian classifiers,naive bayes,semi naive bayes,tree augmented naive bayes,wrapper}, pages = {1-25}, volume = {43}, month = {5}, id = {849df324-6caf-3383-8a2c-c30fe242cf42}, created = {2021-06-02T14:47:14.336Z}, file_attached = {false}, profile_id = {3b531ebc-e490-366b-8004-15c8c731a6be}, group_id = {f2ba26fa-9910-3ec7-8a9f-d77fbd54c238}, last_modified = {2021-06-02T14:47:14.336Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {article}, private_publication = {false}, abstract = {Most of the Bayesian network-based classifiers are usually only able to handle discrete variables. However, most real-world domains involve continuous variables. A common practice to deal with continuous variables is to discretize them, with a subsequent loss of information. This work shows how discrete classifier induction algorithms can be adapted to the conditional Gaussian network paradigm to deal with continuous variables without discretizing them. In addition, three novel classifier induction algorithms and two new propositions about mutual information are introduced. The classifier induction algorithms presented are ordered and grouped according to their structural complexity: naive Bayes, tree augmented naive Bayes, k-dependence Bayesian classifiers and semi naive Bayes. All the classifier induction algorithms are empirically evaluated using predictive accuracy, and they are compared to linear discriminant analysis, as a continuous classic statistical benchmark classifier. Besides, the accuracies for a set of state-of-the-art classifiers are included in order to justify the use of linear discriminant analysis as the benchmark algorithm. In order to understand the behavior of the conditional Gaussian network-based classifiers better, the results include bias-variance decomposition of the expected misclassification rate. The study suggests that semi naive Bayes structure based classifiers and, especially, the novel wrapper condensed semi naive Bayes backward, outperform the behavior of the rest of the presented classifiers. They also obtain quite competitive results compared to the state-of-the-art algorithms included.}, bibtype = {article}, author = {Pérez, A and Larrañaga, P and Inza I, undefined}, journal = {International Journal of Approximate Reasoning}, number = {1} }

Downloads: 0