Quasi-Recurrent Neural Networks. Bradbury, J., Merity, S., Xiong, C., & Socher, R. arXiv:1611.01576 [cs], November, 2016. arXiv: 1611.01576
Paper abstract bibtex Recurrent neural networks are a powerful tool for modeling sequential data, but the dependence of each timestep’s computation on the previous timestep’s output limits parallelism and makes RNNs unwieldy for very long sequences. We introduce quasi-recurrent neural networks (QRNNs), an approach to neural sequence modeling that alternates convolutional layers, which apply in parallel across timesteps, and a minimalist recurrent pooling function that applies in parallel across channels. Despite lacking trainable recurrent layers, stacked QRNNs have better predictive accuracy than stacked LSTMs of the same hidden size. Due to their increased parallelism, they are up to 16 times faster at train and test time. Experiments on language modeling, sentiment classification, and character-level neural machine translation demonstrate these advantages and underline the viability of QRNNs as a basic building block for a variety of sequence tasks.
@article{bradbury_quasi-recurrent_2016,
title = {Quasi-{Recurrent} {Neural} {Networks}},
url = {http://arxiv.org/abs/1611.01576},
abstract = {Recurrent neural networks are a powerful tool for modeling sequential data, but the dependence of each timestep’s computation on the previous timestep’s output limits parallelism and makes RNNs unwieldy for very long sequences. We introduce quasi-recurrent neural networks (QRNNs), an approach to neural sequence modeling that alternates convolutional layers, which apply in parallel across timesteps, and a minimalist recurrent pooling function that applies in parallel across channels. Despite lacking trainable recurrent layers, stacked QRNNs have better predictive accuracy than stacked LSTMs of the same hidden size. Due to their increased parallelism, they are up to 16 times faster at train and test time. Experiments on language modeling, sentiment classification, and character-level neural machine translation demonstrate these advantages and underline the viability of QRNNs as a basic building block for a variety of sequence tasks.},
language = {en},
urldate = {2022-01-19},
journal = {arXiv:1611.01576 [cs]},
author = {Bradbury, James and Merity, Stephen and Xiong, Caiming and Socher, Richard},
month = nov,
year = {2016},
note = {arXiv: 1611.01576},
keywords = {/unread, Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing, ⛔ No DOI found},
}
Downloads: 0
{"_id":"5uxciucybLkpYqztL","bibbaseid":"bradbury-merity-xiong-socher-quasirecurrentneuralnetworks-2016","author_short":["Bradbury, J.","Merity, S.","Xiong, C.","Socher, R."],"bibdata":{"bibtype":"article","type":"article","title":"Quasi-Recurrent Neural Networks","url":"http://arxiv.org/abs/1611.01576","abstract":"Recurrent neural networks are a powerful tool for modeling sequential data, but the dependence of each timestep’s computation on the previous timestep’s output limits parallelism and makes RNNs unwieldy for very long sequences. We introduce quasi-recurrent neural networks (QRNNs), an approach to neural sequence modeling that alternates convolutional layers, which apply in parallel across timesteps, and a minimalist recurrent pooling function that applies in parallel across channels. Despite lacking trainable recurrent layers, stacked QRNNs have better predictive accuracy than stacked LSTMs of the same hidden size. Due to their increased parallelism, they are up to 16 times faster at train and test time. Experiments on language modeling, sentiment classification, and character-level neural machine translation demonstrate these advantages and underline the viability of QRNNs as a basic building block for a variety of sequence tasks.","language":"en","urldate":"2022-01-19","journal":"arXiv:1611.01576 [cs]","author":[{"propositions":[],"lastnames":["Bradbury"],"firstnames":["James"],"suffixes":[]},{"propositions":[],"lastnames":["Merity"],"firstnames":["Stephen"],"suffixes":[]},{"propositions":[],"lastnames":["Xiong"],"firstnames":["Caiming"],"suffixes":[]},{"propositions":[],"lastnames":["Socher"],"firstnames":["Richard"],"suffixes":[]}],"month":"November","year":"2016","note":"arXiv: 1611.01576","keywords":"/unread, Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing, ⛔ No DOI found","bibtex":"@article{bradbury_quasi-recurrent_2016,\n\ttitle = {Quasi-{Recurrent} {Neural} {Networks}},\n\turl = {http://arxiv.org/abs/1611.01576},\n\tabstract = {Recurrent neural networks are a powerful tool for modeling sequential data, but the dependence of each timestep’s computation on the previous timestep’s output limits parallelism and makes RNNs unwieldy for very long sequences. We introduce quasi-recurrent neural networks (QRNNs), an approach to neural sequence modeling that alternates convolutional layers, which apply in parallel across timesteps, and a minimalist recurrent pooling function that applies in parallel across channels. Despite lacking trainable recurrent layers, stacked QRNNs have better predictive accuracy than stacked LSTMs of the same hidden size. Due to their increased parallelism, they are up to 16 times faster at train and test time. Experiments on language modeling, sentiment classification, and character-level neural machine translation demonstrate these advantages and underline the viability of QRNNs as a basic building block for a variety of sequence tasks.},\n\tlanguage = {en},\n\turldate = {2022-01-19},\n\tjournal = {arXiv:1611.01576 [cs]},\n\tauthor = {Bradbury, James and Merity, Stephen and Xiong, Caiming and Socher, Richard},\n\tmonth = nov,\n\tyear = {2016},\n\tnote = {arXiv: 1611.01576},\n\tkeywords = {/unread, Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing, ⛔ No DOI found},\n}\n\n","author_short":["Bradbury, J.","Merity, S.","Xiong, C.","Socher, R."],"key":"bradbury_quasi-recurrent_2016","id":"bradbury_quasi-recurrent_2016","bibbaseid":"bradbury-merity-xiong-socher-quasirecurrentneuralnetworks-2016","role":"author","urls":{"Paper":"http://arxiv.org/abs/1611.01576"},"keyword":["/unread","Computer Science - Artificial Intelligence","Computer Science - Computation and Language","Computer Science - Machine Learning","Computer Science - Neural and Evolutionary Computing","⛔ No DOI found"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/victorjhu","dataSources":["CmHEoydhafhbkXXt5"],"keywords":["/unread","computer science - artificial intelligence","computer science - computation and language","computer science - machine learning","computer science - neural and evolutionary computing","⛔ no doi found"],"search_terms":["quasi","recurrent","neural","networks","bradbury","merity","xiong","socher"],"title":"Quasi-Recurrent Neural Networks","year":2016}