Parallel globally optimal structure learning of Bayesian networks. Nikolova, O., Zola, J., & Aluru, S. Journal of Parallel and Distributed Computing, 73(8):1039-1048, 8, 2013.
Parallel globally optimal structure learning of Bayesian networks [link]Website  doi  abstract   bibtex   
Given n random variables and a set of m observations of each of the n variables, the Bayesian network structure learning problem is to learn a directed acyclic graph (DAG) on the n variables such that the implied joint probability distribution best explains the set of observations. Bayesian networks are widely used in many fields including data mining and computational biology. Globally optimal (exact) structure learning of Bayesian networks takes O(n2⋅2n) time plus the cost of O(n⋅2n) evaluations of an application-specific scoring function whose run-time is at least linear in m. In this paper, we present a parallel algorithm for exact structure learning of a Bayesian network that is communication-efficient and work-optimal up to O(1n⋅2n) processors. We further extend this algorithm to the important restricted case of structure learning with bounded node in-degree and investigate the performance gains achievable because of limiting node in-degree. We demonstrate the applicability of our method by implementation on an IBM Blue Gene/P system and an AMD Opteron InfiniBand cluster and present experimental results that characterize run-time behavior with respect to the number of variables, number of observations, and the bound on in-degree.
@article{
 title = {Parallel globally optimal structure learning of Bayesian networks},
 type = {article},
 year = {2013},
 keywords = {Bayesian networks,Graphical models,Machine learning,Parallel algorithm,Structure learning},
 pages = {1039-1048},
 volume = {73},
 websites = {http://www.sciencedirect.com/science/article/pii/S0743731513000622},
 month = {8},
 id = {8fd919c6-2590-3c59-bb3d-e1c236644e79},
 created = {2015-04-11T19:52:01.000Z},
 accessed = {2015-02-18},
 file_attached = {false},
 profile_id = {95e10851-cdf3-31de-9f82-1ab629e601b0},
 group_id = {4a6a1914-6ba6-3cdc-b1f4-f10a6e56cb6c},
 last_modified = {2017-03-14T14:27:43.598Z},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {true},
 hidden = {false},
 private_publication = {false},
 abstract = {Given n random variables and a set of m observations of each of the n variables, the Bayesian network structure learning problem is to learn a directed acyclic graph (DAG) on the n variables such that the implied joint probability distribution best explains the set of observations. Bayesian networks are widely used in many fields including data mining and computational biology. Globally optimal (exact) structure learning of Bayesian networks takes O(n2⋅2n) time plus the cost of O(n⋅2n) evaluations of an application-specific scoring function whose run-time is at least linear in m. In this paper, we present a parallel algorithm for exact structure learning of a Bayesian network that is communication-efficient and work-optimal up to O(1n⋅2n) processors. We further extend this algorithm to the important restricted case of structure learning with bounded node in-degree and investigate the performance gains achievable because of limiting node in-degree. We demonstrate the applicability of our method by implementation on an IBM Blue Gene/P system and an AMD Opteron InfiniBand cluster and present experimental results that characterize run-time behavior with respect to the number of variables, number of observations, and the bound on in-degree.},
 bibtype = {article},
 author = {Nikolova, Olga and Zola, Jaroslaw and Aluru, Srinivas},
 doi = {10.1016/j.jpdc.2013.04.001},
 journal = {Journal of Parallel and Distributed Computing},
 number = {8}
}

Downloads: 0