VIBNN: Hardware Acceleration of Bayesian Neural Networks. Cai, R., Ren, A., Liu, N., Ding, C., Wang, L., Qian, X., Pedram, M., & Wang, Y. arXiv:1802.00822 [cs, stat], 2018. arXiv: 1802.00822
VIBNN: Hardware Acceleration of Bayesian Neural Networks [link]Paper  doi  abstract   bibtex   
Bayesian Neural Networks (BNNs) have been proposed to address the problem of model uncertainty in training and inference. By introducing weights associated with conditioned probability distributions, BNNs are capable of resolving the overfitting issue commonly seen in conventional neural networks and allow for small-data training, through the variational inference process. Frequent usage of Gaussian random variables in this process requires a properly optimized Gaussian Random Number Generator (GRNG). The high hardware cost of conventional GRNG makes the hardware implementation of BNNs challenging. In this paper, we propose VIBNN, an FPGA-based hardware accelerator design for variational inference on BNNs. We explore the design space for massive amount of Gaussian variable sampling tasks in BNNs. Specifically, we introduce two high performance Gaussian (pseudo) random number generators: the RAM-based Linear Feedback Gaussian Random Number Generator (RLF-GRNG), which is inspired by the properties of binomial distribution and linear feedback logics; and the Bayesian Neural Network-oriented Wallace Gaussian Random Number Generator. To achieve high scalability and efficient memory access, we propose a deep pipelined accelerator architecture with fast execution and good hardware utilization. Experimental results demonstrate that the proposed VIBNN implementations on an FPGA can achieve throughput of 321,543.4 Images/s and energy efficiency upto 52,694.8 Images/J while maintaining similar accuracy as its software counterpart.
@article{cai_vibnn:_2018,
	title = {{VIBNN}: {Hardware} {Acceleration} of {Bayesian} {Neural} {Networks}},
	shorttitle = {{VIBNN}},
	url = {http://arxiv.org/abs/1802.00822},
	doi = {10.1145/3173162.3173212},
	abstract = {Bayesian Neural Networks (BNNs) have been proposed to address the problem of model uncertainty in training and inference. By introducing weights associated with conditioned probability distributions, BNNs are capable of resolving the overfitting issue commonly seen in conventional neural networks and allow for small-data training, through the variational inference process. Frequent usage of Gaussian random variables in this process requires a properly optimized Gaussian Random Number Generator (GRNG). The high hardware cost of conventional GRNG makes the hardware implementation of BNNs challenging. In this paper, we propose VIBNN, an FPGA-based hardware accelerator design for variational inference on BNNs. We explore the design space for massive amount of Gaussian variable sampling tasks in BNNs. Specifically, we introduce two high performance Gaussian (pseudo) random number generators: the RAM-based Linear Feedback Gaussian Random Number Generator (RLF-GRNG), which is inspired by the properties of binomial distribution and linear feedback logics; and the Bayesian Neural Network-oriented Wallace Gaussian Random Number Generator. To achieve high scalability and efficient memory access, we propose a deep pipelined accelerator architecture with fast execution and good hardware utilization. Experimental results demonstrate that the proposed VIBNN implementations on an FPGA can achieve throughput of 321,543.4 Images/s and energy efficiency upto 52,694.8 Images/J while maintaining similar accuracy as its software counterpart.},
	urldate = {2018-06-07TZ},
	journal = {arXiv:1802.00822 [cs, stat]},
	author = {Cai, Ruizhe and Ren, Ao and Liu, Ning and Ding, Caiwen and Wang, Luhao and Qian, Xuehai and Pedram, Massoud and Wang, Yanzhi},
	year = {2018},
	note = {arXiv: 1802.00822},
	pages = {476--488}
}

Downloads: 0