var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FZhengTang1120%2Fzhengtang1120.github.io%2Fmain%2Fmy.bib&commas=true&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FZhengTang1120%2Fzhengtang1120.github.io%2Fmain%2Fmy.bib&commas=true&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FZhengTang1120%2Fzhengtang1120.github.io%2Fmain%2Fmy.bib&commas=true&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Alpagasus: Training a Better Alpaca Model with Fewer Data.\n \n \n \n \n\n\n \n Chen, L., Li, S., Yan, J., Wang, H., Gunaratna, K., Yadav, V., Tang, Z., Srinivasan, V., Zhou, T., Huang, H., & Jin, H.\n\n\n \n\n\n\n In The Twelfth International Conference on Learning Representations, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"Alpagasus:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\nchen2024alpagasus,\ntitle={Alpagasus: Training a Better Alpaca Model with Fewer Data},\nauthor={Lichang Chen and Shiyang Li and Jun Yan and Hai Wang and Kalpa Gunaratna and Vikas Yadav and Zheng Tang and Vijay Srinivasan and Tianyi Zhou and Heng Huang and Hongxia Jin},\nbooktitle={The Twelfth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=FdVXgSJhvz}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Instruction-following Evaluation through Verbalizer Manipulation.\n \n \n \n \n\n\n \n Li, S., Yan, J., Wang, H., Tang, Z., Ren, X., Srinivasan, V., & Jin, H.\n\n\n \n\n\n\n In 2024 Annual Conference of the North American Chapter of the Association for Computational Linguistics, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"Instruction-followingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\nli2024instructionfollowing,\ntitle={Instruction-following Evaluation through Verbalizer Manipulation},\nauthor={Shiyang Li and Jun Yan and Hai Wang and Zheng Tang and Xiang Ren and Vijay Srinivasan and Hongxia Jin},\nbooktitle={2024 Annual Conference of the North American Chapter of the Association for Computational Linguistics},\nyear={2024},\nurl={https://openreview.net/forum?id=sCeQOBtPmp}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Backdooring Instruction-Tuned Large Language Models with Virtual Prompt Injection.\n \n \n \n \n\n\n \n Yan, J., Yadav, V., Li, S., Chen, L., Tang, Z., Wang, H., Srinivasan, V., Ren, X., & Jin, H.\n\n\n \n\n\n\n In 2024 Annual Conference of the North American Chapter of the Association for Computational Linguistics, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"BackdooringPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\nyan2024backdooring,\ntitle={Backdooring Instruction-Tuned Large Language Models with Virtual Prompt Injection},\nauthor={Jun Yan and Vikas Yadav and Shiyang Li and Lichang Chen and Zheng Tang and Hai Wang and Vijay Srinivasan and Xiang Ren and Hongxia Jin},\nbooktitle={2024 Annual Conference of the North American Chapter of the Association for Computational Linguistics},\nyear={2024},\nurl={https://openreview.net/forum?id=ucBWb6LKUH}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n It Takes Two Flints to Make a Fire: Multitask Learning of Neural Relation and Explanation Classifiers.\n \n \n \n \n\n\n \n Tang, Z., & Surdeanu, M.\n\n\n \n\n\n\n Computational Linguistics, 49(1): 117-156. 03 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ItPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 39 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{10.1162/coli_a_00463,\n    author = {Tang, Zheng and Surdeanu, Mihai},\n    title = "{It Takes Two Flints to Make a Fire: Multitask Learning of Neural Relation and Explanation Classifiers}",\n    journal = {Computational Linguistics},\n    volume = {49},\n    number = {1},\n    pages = {117-156},\n    year = {2023},\n    month = {03},\n    abstract = "{We propose an explainable approach for relation extraction that mitigates the tension between generalization and explainability by jointly training for the two goals. Our approach uses a multi-task learning architecture, which jointly trains a classifier for relation extraction, and a sequence model that labels words in the context of the relations that explain the decisions of the relation classifier. We also convert the model outputs to rules to bring global explanations to this approach. This sequence model is trained using a hybrid strategy: supervised, when supervision from pre-existing patterns is available, and semi-supervised otherwise. In the latter situation, we treat the sequence model’s labels as latent variables, and learn the best assignment that maximizes the performance of the relation classifier. We evaluate the proposed approach on the two datasets and show that the sequence model provides labels that serve as accurate explanations for the relation classifier’s decisions, and, importantly, that the joint training generally improves the performance of the relation classifier. We also evaluate the performance of the generated rules and show that the new rules are a great add-on to the manual rules and bring the rule-based system much closer to the neural models.}",\n    issn = {0891-2017},\n    doi = {10.1162/coli_a_00463},\n    url = {https://doi.org/10.1162/coli\\_a\\_00463},\n    eprint = {https://direct.mit.edu/coli/article-pdf/49/1/117/2068962/coli\\_a\\_00463.pdf},\n}\n
\n
\n\n\n
\n We propose an explainable approach for relation extraction that mitigates the tension between generalization and explainability by jointly training for the two goals. Our approach uses a multi-task learning architecture, which jointly trains a classifier for relation extraction, and a sequence model that labels words in the context of the relations that explain the decisions of the relation classifier. We also convert the model outputs to rules to bring global explanations to this approach. This sequence model is trained using a hybrid strategy: supervised, when supervision from pre-existing patterns is available, and semi-supervised otherwise. In the latter situation, we treat the sequence model’s labels as latent variables, and learn the best assignment that maximizes the performance of the relation classifier. We evaluate the proposed approach on the two datasets and show that the sequence model provides labels that serve as accurate explanations for the relation classifier’s decisions, and, importantly, that the joint training generally improves the performance of the relation classifier. We also evaluate the performance of the generated rules and show that the new rules are a great add-on to the manual rules and bring the rule-based system much closer to the neural models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bootstrapping Neural Relation and Explanation Classifiers.\n \n \n \n \n\n\n \n Tang, Z., & Surdeanu, M.\n\n\n \n\n\n\n In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 48–56, Toronto, Canada, July 2023. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"BootstrappingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{tang-surdeanu-2023-bootstrapping,\n    title = "Bootstrapping Neural Relation and Explanation Classifiers",\n    author = "Tang, Zheng  and\n      Surdeanu, Mihai",\n    booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",\n    month = jul,\n    year = "2023",\n    address = "Toronto, Canada",\n    publisher = "Association for Computational Linguistics",\n    url = "https://aclanthology.org/2023.acl-short.5",\n    pages = "48--56",\n    abstract = "We introduce a method that self trains (or bootstraps) neural relation and explanation classifiers. Our work expands the supervised approach of CITATION, which jointly trains a relation classifier with an explanation classifier that identifies context words important for the relation at hand, to semi-supervised scenarios. In particular, our approach iteratively converts the explainable models{'} outputs to rules and applies them to unlabeled text to produce new annotations.Our evaluation on the TACRED dataset shows that our method outperforms the rule-based model we started from by 15 F1 points, outperforms traditional self-training that relies just on the relation classifier by 5 F1 points, and performs comparatively with the prompt-based approach of CITATION (without requiring an additional natural language inference component).",\n}\n
\n
\n\n\n
\n We introduce a method that self trains (or bootstraps) neural relation and explanation classifiers. Our work expands the supervised approach of CITATION, which jointly trains a relation classifier with an explanation classifier that identifies context words important for the relation at hand, to semi-supervised scenarios. In particular, our approach iteratively converts the explainable models' outputs to rules and applies them to unlabeled text to produce new annotations.Our evaluation on the TACRED dataset shows that our method outperforms the rule-based model we started from by 15 F1 points, outperforms traditional self-training that relies just on the relation classifier by 5 F1 points, and performs comparatively with the prompt-based approach of CITATION (without requiring an additional natural language inference component).\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Taxonomy Builder: a Data-driven and User-centric Tool for Streamlining Taxonomy Construction.\n \n \n \n \n\n\n \n Surdeanu, M., Hungerford, J., Chan, Y. S., MacBride, J., Gyori, B., Zupon, A., Tang, Z., Qiu, H., Min, B., Zverev, Y., Hilverman, C., Thomas, M., Andrews, W., Alcock, K., Zhang, Z., Reynolds, M., Bethard, S., Sharp, R., & Laparra, E.\n\n\n \n\n\n\n In Proceedings of the Second Workshop on Bridging Human–Computer Interaction and Natural Language Processing, pages 1–10, Seattle, Washington, July 2022. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"TaxonomyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{surdeanu-etal-2022-taxonomy,\n    title = "Taxonomy Builder: a Data-driven and User-centric Tool for Streamlining Taxonomy Construction",\n    author = "Surdeanu, Mihai  and\n      Hungerford, John  and\n      Chan, Yee Seng  and\n      MacBride, Jessica  and\n      Gyori, Benjamin  and\n      Zupon, Andrew  and\n      Tang, Zheng  and\n      Qiu, Haoling  and\n      Min, Bonan  and\n      Zverev, Yan  and\n      Hilverman, Caitlin  and\n      Thomas, Max  and\n      Andrews, Walter  and\n      Alcock, Keith  and\n      Zhang, Zeyu  and\n      Reynolds, Michael  and\n      Bethard, Steven  and\n      Sharp, Rebecca  and\n      Laparra, Egoitz",\n    booktitle = "Proceedings of the Second Workshop on Bridging Human--Computer Interaction and Natural Language Processing",\n    month = jul,\n    year = "2022",\n    address = "Seattle, Washington",\n    publisher = "Association for Computational Linguistics",\n    url = "https://aclanthology.org/2022.hcinlp-1.1",\n    pages = "1--10",\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Interpretability Rules: Jointly Bootstrapping a Neural Relation Extractor with an Explanation Decoder.\n \n \n \n \n\n\n \n Tang, Z., & Surdeanu, M.\n\n\n \n\n\n\n In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: TrustNLP Workshop, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"InterpretabilityPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 23 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{zheng-tang-2021-edin,\n    title = "Interpretability Rules: Jointly Bootstrapping a Neural Relation Extractor with an Explanation Decoder",\n    author = "Tang, Zheng and Surdeanu, Mihai",\n    booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: TrustNLP Workshop",\n    year = "2021",\n    url = "https://aclanthology.org/2021.trustnlp-1.1.pdf"\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How May I Help You? Using Neural Text Simplification to Improve Downstream NLP Tasks.\n \n \n \n \n\n\n \n Van, H., Tang, Z., & Surdeanu, M.\n\n\n \n\n\n\n In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4074–4080, Punta Cana, Dominican Republic, November 2021. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{van-etal-2021-may-help,\n    title = "How May {I} Help You? Using Neural Text Simplification to Improve Downstream {NLP} Tasks",\n    author = "Van, Hoang  and\n      Tang, Zheng  and\n      Surdeanu, Mihai",\n    booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",\n    month = nov,\n    year = "2021",\n    address = "Punta Cana, Dominican Republic",\n    publisher = "Association for Computational Linguistics",\n    url = "https://aclanthology.org/2021.findings-emnlp.343",\n    pages = "4074--4080",\n    abstract = "The general goal of text simplification (TS) is to reduce text complexity for human consumption. In this paper, we investigate another potential use of neural TS: assisting machines performing natural language processing (NLP) tasks. We evaluate the use of neural TS in two ways: simplifying input texts at prediction time and augmenting data to provide machines with additional information during training. We demonstrate that the latter scenario provides positive effects on machine performance on two separate datasets. In particular, the latter use of TS improves the performances of LSTM (1.82{--}1.98{\\%}) and SpanBERT (0.7{--}1.3{\\%}) extractors on TACRED, a complex, large-scale, real-world relation extraction task. Further, the same setting yields improvements of up to 0.65{\\%} matched and 0.62{\\%} mismatched accuracies for a BERT text classifier on MNLI, a practical natural language inference dataset.",\n}\n
\n
\n\n\n
\n The general goal of text simplification (TS) is to reduce text complexity for human consumption. In this paper, we investigate another potential use of neural TS: assisting machines performing natural language processing (NLP) tasks. We evaluate the use of neural TS in two ways: simplifying input texts at prediction time and augmenting data to provide machines with additional information during training. We demonstrate that the latter scenario provides positive effects on machine performance on two separate datasets. In particular, the latter use of TS improves the performances of LSTM (1.82–1.98%) and SpanBERT (0.7–1.3%) extractors on TACRED, a complex, large-scale, real-world relation extraction task. Further, the same setting yields improvements of up to 0.65% matched and 0.62% mismatched accuracies for a BERT text classifier on MNLI, a practical natural language inference dataset.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Exploring Interpretability in Event Extraction: Multitask Learning of a Neural Event Classifier and an Explanation Decoder.\n \n \n \n \n\n\n \n Tang, Z., Hahn-Powell, G., & Surdeanu, M.\n\n\n \n\n\n\n In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop, Seattle, United States, July 2020. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 26 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{zheng-tang-2019-edin,\n    title = "Exploring Interpretability in Event Extraction: Multitask Learning of a Neural Event Classifier and an Explanation Decoder",\n    author = "Tang, Zheng and Hahn-Powell, Gustave and Surdeanu, Mihai",\n    booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop",\n    month = jul,\n    year = "2020",\n    address = "Seattle, United States",\n    publisher = "Association for Computational Linguistics",\n    url = "https://aclanthology.org/2020.acl-srw.23/"\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Eidos, INDRA, & Delphi: From Free Text to Executable Causal Models.\n \n \n \n \n\n\n \n Sharp, R., Pyarelal, A., Gyori, B. M., Alcock, K., Laparra, E., Valenzuela-Escárcega, M. A., Nagesh, A., Yadav, V., Bachman, J. A., Tang, Z., Lent, H., Luo, F., Paul, M., Bethard, S., Barnard, K., Morrison, C., & Surdeanu, M.\n\n\n \n\n\n\n In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, 2019. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"Eidos,Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{N19-eidosdelphi,\n    title = "Eidos, INDRA, \\& Delphi: From Free Text to Executable Causal Models",\n    author = {\n        Sharp, Rebecca and\n  Pyarelal,Adarsh and\n  Gyori,Benjamin M. and\n  Alcock,Keith and\n  Laparra,Egoitz and\n  Valenzuela-Esc{\\'a}rcega,Marco A. and\n  Nagesh,Ajay and\n  Yadav,Vikas and\n  Bachman,John A. and\n  Tang,Zheng and\n  Lent,Heather and\n  Luo,Fan and\n  Paul,Mithun and\n  Bethard,Steven and\n  Barnard,Kobus and\n  Morrison,Clayton and\n  Surdeanu, Mihai},\n    booktitle = "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations",\n    year = "2019",\n    publisher = "Association for Computational Linguistics",\n    url = {https://aclanthology.org/N19-4008/}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Alert Generation in Execution Monitoring Using Resource Envelopes.\n \n \n \n \n\n\n \n Kumar, T. K. S., Xu, H., Tang, Z., Kumar, A., Rogers, C. M., & Knoblock, C. A.\n\n\n \n\n\n\n In Proceedings of the 31st International FLAIRS Conference (FLAIRS), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"AlertPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kumar2018,\n    author = {Kumar, T. K. Satish and Xu, Hong and Tang, Zheng and Kumar, Anoop and Rogers, Craig Milo and Knoblock, Craig A.},\n    title = {Alert Generation in Execution Monitoring Using Resource Envelopes},\n    booktitle = {Proceedings of the 31st International FLAIRS Conference (FLAIRS)},\n    year = {2018},\n    url = {http://files.hong.me/papers/kumar2018.pdf}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Distributed Logical Filter for Connected Row Convex Constraints.\n \n \n \n \n\n\n \n Kumar, T. K. S., Xu, H., Tang, Z., Kumar, A., Rogers, C. M., & Knoblock, C. A.\n\n\n \n\n\n\n In Proceedings of the 29th IEEE International Conference on Tools with Artificial Intelligence (ICTAI), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kumar2017,\n    author = {Kumar, T. K. Satish and Xu, Hong and Tang, Zheng and Kumar, Anoop and Rogers, Craig Milo and Knoblock, Craig A.},\n    year = {2017},\n    booktitle = {Proceedings of the 29th IEEE International Conference on Tools with Artificial Intelligence (ICTAI)},\n    title = {A Distributed Logical Filter for Connected Row Convex Constraints},\n    url = {http://files.hong.me/papers/kumar2017.pdf},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);