IndoLEM and IndoBERT: A Benchmark Dataset and Pre-trained Language Model for Indonesian NLP. Koto, F., Rahimi, A., Lau, J. H., & Baldwin, T. In Scott, D., Bel, N., & Zong, C., editors, Proceedings of the 28th International Conference on Computational Linguistics, COLING 2020, Barcelona, Spain (Online), December 8-13, 2020, pages 757–770, 2020. International Committee on Computational Linguistics.
IndoLEM and IndoBERT: A Benchmark Dataset and Pre-trained Language Model for Indonesian NLP [link]Paper  doi  bibtex   
@inproceedings{DBLP:conf/coling/KotoRLB20,
  author       = {Fajri Koto and
                  Afshin Rahimi and
                  Jey Han Lau and
                  Timothy Baldwin},
  editor       = {Donia Scott and
                  N{\'{u}}ria Bel and
                  Chengqing Zong},
  title        = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language
                  Model for Indonesian {NLP}},
  booktitle    = {Proceedings of the 28th International Conference on Computational
                  Linguistics, {COLING} 2020, Barcelona, Spain (Online), December 8-13,
                  2020},
  pages        = {757--770},
  publisher    = {International Committee on Computational Linguistics},
  year         = {2020},
  url          = {https://doi.org/10.18653/v1/2020.coling-main.66},
  doi          = {10.18653/V1/2020.COLING-MAIN.66},
  timestamp    = {Fri, 06 Aug 2021 01:00:00 +0200},
  biburl       = {https://dblp.org/rec/conf/coling/KotoRLB20.bib},
  bibsource    = {dblp computer science bibliography, https://dblp.org}
}

Downloads: 0