A Tutorial on Learning with Bayesian Networks. Heckerman, D. In Holmes, D. E. & Jain, L. C., editors, Innovations in Bayesian Networks: Theory and Applications, of Studies in Computational Intelligence, pages 33–82. Springer, Berlin, Heidelberg, 2008. Paper doi abstract bibtex A Bayesian network is a graphical model that encodes probabilistic relationships among variables of interest. When used in conjunction with statistical techniques, the graphical model has several advantages for data analysis. One, because the model encodes dependencies among all variables, it readily handles situations where some data entries are missing. Two, a Bayesian network can be used to learn causal relationships, and hence can be used to gain understanding about a problem domain and to predict the consequences of intervention. Three, because the model has both a causal and probabilistic semantics, it is an ideal representation for combining prior knowledge (which often comes in causal form) and data. Four, Bayesian statistical methods in conjunction with Bayesian networks offer an efficient and principled approach for avoiding the overfitting of data. In this paper, we discuss methods for constructing Bayesian networks from prior knowledge and summarize Bayesian statistical methods for using data to improve these models. With regard to the latter task, we describe methods for learning both the parameters and structure of a Bayesian network, including techniques for learning with incomplete data. In addition, we relate Bayesian-network methods for learning to techniques for supervised and unsupervised learning. We illustrate the graphical-modeling approach using a real-world case study.
@incollection{heckerman_tutorial_2008,
address = {Berlin, Heidelberg},
series = {Studies in {Computational} {Intelligence}},
title = {A {Tutorial} on {Learning} with {Bayesian} {Networks}},
isbn = {978-3-540-85066-3},
url = {https://doi.org/10.1007/978-3-540-85066-3_3},
abstract = {A Bayesian network is a graphical model that encodes probabilistic relationships among variables of interest. When used in conjunction with statistical techniques, the graphical model has several advantages for data analysis. One, because the model encodes dependencies among all variables, it readily handles situations where some data entries are missing. Two, a Bayesian network can be used to learn causal relationships, and hence can be used to gain understanding about a problem domain and to predict the consequences of intervention. Three, because the model has both a causal and probabilistic semantics, it is an ideal representation for combining prior knowledge (which often comes in causal form) and data. Four, Bayesian statistical methods in conjunction with Bayesian networks offer an efficient and principled approach for avoiding the overfitting of data. In this paper, we discuss methods for constructing Bayesian networks from prior knowledge and summarize Bayesian statistical methods for using data to improve these models. With regard to the latter task, we describe methods for learning both the parameters and structure of a Bayesian network, including techniques for learning with incomplete data. In addition, we relate Bayesian-network methods for learning to techniques for supervised and unsupervised learning. We illustrate the graphical-modeling approach using a real-world case study.},
language = {en},
urldate = {2021-11-17},
booktitle = {Innovations in {Bayesian} {Networks}: {Theory} and {Applications}},
publisher = {Springer},
author = {Heckerman, David},
editor = {Holmes, Dawn E. and Jain, Lakhmi C.},
year = {2008},
doi = {10.1007/978-3-540-85066-3_3},
keywords = {Bayesian Network, Hide Variable, Intelligence Quotient, Marginal Likelihood, Network Structure, bn, tutorial},
pages = {33--82},
}
Downloads: 0
{"_id":"ActTeaKJFM7b5pNP4","bibbaseid":"heckerman-atutorialonlearningwithbayesiannetworks-2008","downloads":0,"creationDate":"2017-04-19T15:50:25.598Z","title":"A Tutorial on Learning with Bayesian Networks","author_short":["Heckerman, D."],"year":2008,"bibtype":"incollection","biburl":"https://bibbase.org/zotero/mh_lenguyen","bibdata":{"bibtype":"incollection","type":"incollection","address":"Berlin, Heidelberg","series":"Studies in Computational Intelligence","title":"A Tutorial on Learning with Bayesian Networks","isbn":"978-3-540-85066-3","url":"https://doi.org/10.1007/978-3-540-85066-3_3","abstract":"A Bayesian network is a graphical model that encodes probabilistic relationships among variables of interest. When used in conjunction with statistical techniques, the graphical model has several advantages for data analysis. One, because the model encodes dependencies among all variables, it readily handles situations where some data entries are missing. Two, a Bayesian network can be used to learn causal relationships, and hence can be used to gain understanding about a problem domain and to predict the consequences of intervention. Three, because the model has both a causal and probabilistic semantics, it is an ideal representation for combining prior knowledge (which often comes in causal form) and data. Four, Bayesian statistical methods in conjunction with Bayesian networks offer an efficient and principled approach for avoiding the overfitting of data. In this paper, we discuss methods for constructing Bayesian networks from prior knowledge and summarize Bayesian statistical methods for using data to improve these models. With regard to the latter task, we describe methods for learning both the parameters and structure of a Bayesian network, including techniques for learning with incomplete data. In addition, we relate Bayesian-network methods for learning to techniques for supervised and unsupervised learning. We illustrate the graphical-modeling approach using a real-world case study.","language":"en","urldate":"2021-11-17","booktitle":"Innovations in Bayesian Networks: Theory and Applications","publisher":"Springer","author":[{"propositions":[],"lastnames":["Heckerman"],"firstnames":["David"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Holmes"],"firstnames":["Dawn","E."],"suffixes":[]},{"propositions":[],"lastnames":["Jain"],"firstnames":["Lakhmi","C."],"suffixes":[]}],"year":"2008","doi":"10.1007/978-3-540-85066-3_3","keywords":"Bayesian Network, Hide Variable, Intelligence Quotient, Marginal Likelihood, Network Structure, bn, tutorial","pages":"33–82","bibtex":"@incollection{heckerman_tutorial_2008,\n\taddress = {Berlin, Heidelberg},\n\tseries = {Studies in {Computational} {Intelligence}},\n\ttitle = {A {Tutorial} on {Learning} with {Bayesian} {Networks}},\n\tisbn = {978-3-540-85066-3},\n\turl = {https://doi.org/10.1007/978-3-540-85066-3_3},\n\tabstract = {A Bayesian network is a graphical model that encodes probabilistic relationships among variables of interest. When used in conjunction with statistical techniques, the graphical model has several advantages for data analysis. One, because the model encodes dependencies among all variables, it readily handles situations where some data entries are missing. Two, a Bayesian network can be used to learn causal relationships, and hence can be used to gain understanding about a problem domain and to predict the consequences of intervention. Three, because the model has both a causal and probabilistic semantics, it is an ideal representation for combining prior knowledge (which often comes in causal form) and data. Four, Bayesian statistical methods in conjunction with Bayesian networks offer an efficient and principled approach for avoiding the overfitting of data. In this paper, we discuss methods for constructing Bayesian networks from prior knowledge and summarize Bayesian statistical methods for using data to improve these models. With regard to the latter task, we describe methods for learning both the parameters and structure of a Bayesian network, including techniques for learning with incomplete data. In addition, we relate Bayesian-network methods for learning to techniques for supervised and unsupervised learning. We illustrate the graphical-modeling approach using a real-world case study.},\n\tlanguage = {en},\n\turldate = {2021-11-17},\n\tbooktitle = {Innovations in {Bayesian} {Networks}: {Theory} and {Applications}},\n\tpublisher = {Springer},\n\tauthor = {Heckerman, David},\n\teditor = {Holmes, Dawn E. and Jain, Lakhmi C.},\n\tyear = {2008},\n\tdoi = {10.1007/978-3-540-85066-3_3},\n\tkeywords = {Bayesian Network, Hide Variable, Intelligence Quotient, Marginal Likelihood, Network Structure, bn, tutorial},\n\tpages = {33--82},\n}\n\n\n\n","author_short":["Heckerman, D."],"editor_short":["Holmes, D. E.","Jain, L. C."],"key":"heckerman_tutorial_2008","id":"heckerman_tutorial_2008","bibbaseid":"heckerman-atutorialonlearningwithbayesiannetworks-2008","role":"author","urls":{"Paper":"https://doi.org/10.1007/978-3-540-85066-3_3"},"keyword":["Bayesian Network","Hide Variable","Intelligence Quotient","Marginal Likelihood","Network Structure","bn","tutorial"],"metadata":{"authorlinks":{}},"downloads":0,"html":""},"search_terms":["tutorial","learning","bayesian","networks","heckerman"],"keywords":["bayesian network","hide variable","intelligence quotient","marginal likelihood","network structure","bn","tutorial"],"authorIDs":[],"dataSources":["M66iSiMeC2pAhHPfr","iwKepCrWBps7ojhDx","jiRYq6eM8MaP6gkgG"]}