\n \n \n
\n
\n\n \n \n \n \n \n \n The ToMCAT Dataset.\n \n \n \n \n\n\n \n Adarsh Pyarelal; Eric Duong; Caleb Jones Shibu; Paulo Soares; Savannah Boyd; Payal Khosla; Valeria Pfeifer; Diheng Zhang; Eric S Andrews; Rick Champlin; Vincent Paul Raymond; Meghavarshini Krishnaswamy; Clayton Morrison; Emily Butler; and Kobus Barnard.\n\n\n \n\n\n\n In
Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{\n pyarelal2023the,\n title={The To{MCAT} Dataset},\n author={Adarsh Pyarelal and Eric Duong and Caleb Jones Shibu and Paulo Soares and Savannah Boyd and Payal Khosla and Valeria Pfeifer and Diheng Zhang and Eric S Andrews and Rick Champlin and Vincent Paul Raymond and Meghavarshini Krishnaswamy and Clayton Morrison and Emily Butler and Kobus Barnard},\n booktitle={Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track},\n year={2023},\n url={https://openreview.net/forum?id=ZJWQfgXQb6}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Who is Speaking? Speaker-Aware Multiparty Dialogue Act Classification.\n \n \n \n \n\n\n \n Ayesha Qamar; Adarsh Pyarelal; and Ruihong Huang.\n\n\n \n\n\n\n In Houda Bouamor; Juan Pino; and Kalika Bali., editor(s),
Findings of the Association for Computational Linguistics: EMNLP 2023, pages 10122–10135, Singapore, December 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{qamar-etal-2023-speaking,\n title = "Who is Speaking? Speaker-Aware Multiparty Dialogue Act Classification",\n author = "Qamar, Ayesha and\n Pyarelal, Adarsh and\n Huang, Ruihong",\n editor = "Bouamor, Houda and\n Pino, Juan and\n Bali, Kalika",\n booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",\n month = dec,\n year = "2023",\n address = "Singapore",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.findings-emnlp.678",\n pages = "10122--10135",\n abstract = "Utterances do not occur in isolation in dialogues; it is essential to have the information of who the speaker of an utterance is to be able to recover the speaker{'}s intention with respect to the surrounding context. Beyond simply capturing speaker switches, identifying how speakers interact with each other in a dialogue is crucial to understanding conversational flow. This becomes increasingly important and simultaneously difficult to model when more than two interlocutors take part in a conversation. To overcome this challenge, we propose to explicitly add speaker awareness to each utterance representation. To that end, we use a graph neural network to model how each speaker is behaving within the local context of a conversation. The speaker representations learned this way are then used to update their respective utterance representations. We experiment with both multiparticipant and dyadic conversations on the MRDA and SwDA datasets and show the effectiveness of our approach.",\n}\n\n
\n
\n\n\n
\n Utterances do not occur in isolation in dialogues; it is essential to have the information of who the speaker of an utterance is to be able to recover the speaker's intention with respect to the surrounding context. Beyond simply capturing speaker switches, identifying how speakers interact with each other in a dialogue is crucial to understanding conversational flow. This becomes increasingly important and simultaneously difficult to model when more than two interlocutors take part in a conversation. To overcome this challenge, we propose to explicitly add speaker awareness to each utterance representation. To that end, we use a graph neural network to model how each speaker is behaving within the local context of a conversation. The speaker representations learned this way are then used to update their respective utterance representations. We experiment with both multiparticipant and dyadic conversations on the MRDA and SwDA datasets and show the effectiveness of our approach.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Hierarchical Fusion for Online Multimodal Dialog Act Classification.\n \n \n \n \n\n\n \n Md Messal Monem Miah; Adarsh Pyarelal; and Ruihong Huang.\n\n\n \n\n\n\n In Houda Bouamor; Juan Pino; and Kalika Bali., editor(s),
Findings of the Association for Computational Linguistics: EMNLP 2023, pages 7532–7545, Singapore, December 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{miah-etal-2023-hierarchical,\n title = "Hierarchical Fusion for Online Multimodal Dialog Act Classification",\n author = "Miah, Md Messal Monem and\n Pyarelal, Adarsh and\n Huang, Ruihong",\n editor = "Bouamor, Houda and\n Pino, Juan and\n Bali, Kalika",\n booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",\n month = dec,\n year = "2023",\n address = "Singapore",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.findings-emnlp.505",\n pages = "7532--7545",\n abstract = "We propose a framework for online multimodal dialog act (DA) classification based on raw audio and ASR-generated transcriptions of current and past utterances. Existing multimodal DA classification approaches are limited by ineffective audio modeling and late-stage fusion. We showcase significant improvements in multimodal DA classification by integrating modalities at a more granular level and incorporating recent advancements in large language and audio models for audio feature extraction. We further investigate the effectiveness of self-attention and cross-attention mechanisms in modeling utterances and dialogs for DA classification. We achieve a substantial increase of 3 percentage points in the F1 score relative to current state-of-the-art models on two prominent DA classification datasets, MRDA and EMOTyDA.",\n}\n\n
\n
\n\n\n
\n We propose a framework for online multimodal dialog act (DA) classification based on raw audio and ASR-generated transcriptions of current and past utterances. Existing multimodal DA classification approaches are limited by ineffective audio modeling and late-stage fusion. We showcase significant improvements in multimodal DA classification by integrating modalities at a more granular level and incorporating recent advancements in large language and audio models for audio feature extraction. We further investigate the effectiveness of self-attention and cross-attention mechanisms in modeling utterances and dialogs for DA classification. We achieve a substantial increase of 3 percentage points in the F1 score relative to current state-of-the-art models on two prominent DA classification datasets, MRDA and EMOTyDA.\n
\n\n\n
\n\n\n \n\n\n
\n
\n\n \n \n \n \n \n \n Transferring Legal Natural Language Inference Model from a US State to Another: What Makes It So Hard?.\n \n \n \n \n\n\n \n Alice Kwak; Gaetano Forte; Derek Bambauer; and Mihai Surdeanu.\n\n\n \n\n\n\n In
Proceedings of the Natural Legal Language Processing Workshop 2023, December 2023. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{kwak-et-al-nllp2023-error-analysis,\n title = "Transferring Legal Natural Language Inference Model from a US State to Another: What Makes It So Hard?",\n author = "Alice Kwak and Gaetano Forte and Derek Bambauer and Mihai Surdeanu",\n booktitle = "Proceedings of the Natural Legal Language Processing Workshop 2023",\n month = dec,\n year = "2023",\n url = "https://clulab.org/papers/nllp2023_kwak-et-al.pdf",\n abstract = "This study investigates whether a legal natural language inference (NLI) model trained on the data from one US state can be transferred to another state. We fine-tuned a pre-trained model on the task of evaluating the validity of legal will statements, once with the dataset containing the Tennessee wills and once with the dataset containing the Idaho wills. Each model’s performance on the in-domain setting and the out-of-domain setting are compared to see if the models can across the states. We found that the model trained on one US state can be mostly transferred to another state. However, it is clear that the model’s performance drops in the out-of-domain setting. The F1 scores of the Tennessee model and the Idaho model are 96.41 and 92.03 when predicting the data from the same state, but they drop to 66.32 and 81.60 when predicting the data from another state. Subsequent error analysis revealed that there are two major sources of errors. First, the model fails to recognize equivalent laws across states when there are stylistic differences between laws. Second, difference in statutory section numbering system between the states makes it difficult for the model to locate laws relevant to the cases being predicted on. This analysis provides insights on how the future NLI system can be improved. Also, our findings offer empirical support to legal experts advocating the standardization of legal documents.",\n}\n\n
\n
\n\n\n
\n This study investigates whether a legal natural language inference (NLI) model trained on the data from one US state can be transferred to another state. We fine-tuned a pre-trained model on the task of evaluating the validity of legal will statements, once with the dataset containing the Tennessee wills and once with the dataset containing the Idaho wills. Each model’s performance on the in-domain setting and the out-of-domain setting are compared to see if the models can across the states. We found that the model trained on one US state can be mostly transferred to another state. However, it is clear that the model’s performance drops in the out-of-domain setting. The F1 scores of the Tennessee model and the Idaho model are 96.41 and 92.03 when predicting the data from the same state, but they drop to 66.32 and 81.60 when predicting the data from another state. Subsequent error analysis revealed that there are two major sources of errors. First, the model fails to recognize equivalent laws across states when there are stylistic differences between laws. Second, difference in statutory section numbering system between the states makes it difficult for the model to locate laws relevant to the cases being predicted on. This analysis provides insights on how the future NLI system can be improved. Also, our findings offer empirical support to legal experts advocating the standardization of legal documents.\n
\n\n\n
\n\n\n \n\n\n
\n
\n\n \n \n \n \n \n \n Improving Zero-shot Relation Classification via Automatically-acquired Entailment Templates.\n \n \n \n \n\n\n \n Mahdi Rahimi; and Mihai Surdeanu.\n\n\n \n\n\n\n In
Proceedings of the 8th Workshop on Representation Learning for NLP (RepL4NLP 2023), pages 187–195, Toronto, Canada, July 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{rahimi-surdeanu-2023-improving,\n title = "Improving Zero-shot Relation Classification via Automatically-acquired Entailment Templates",\n author = "Rahimi, Mahdi and\n Surdeanu, Mihai",\n booktitle = "Proceedings of the 8th Workshop on Representation Learning for NLP (RepL4NLP 2023)",\n month = jul,\n year = "2023",\n address = "Toronto, Canada",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.repl4nlp-1.16",\n pages = "187--195",\n abstract = "While fully supervised relation classification (RC) models perform well on large-scale datasets, their performance drops drastically in low-resource settings. As generating annotated examples are expensive, recent zero-shot methods have been proposed that reformulate RC into other NLP tasks for which supervision exists such as textual entailment. However, these methods rely on templates that are manually created which is costly and requires domain expertise. In this paper, we present a novel strategy for template generation for relation classification, which is based on adapting Harris{'} distributional similarity principle to templates encoded using contextualized representations. Further, we perform empirical evaluation of different strategies for combining the automatically acquired templates with manual templates. The experimental results on TACRED show that our approach not only performs better than the zero-shot RC methods that only use manual templates, but also that it achieves state-of-the-art performance for zero-shot TACRED at 64.3 F1 score.",\n}\n\n
\n
\n\n\n
\n While fully supervised relation classification (RC) models perform well on large-scale datasets, their performance drops drastically in low-resource settings. As generating annotated examples are expensive, recent zero-shot methods have been proposed that reformulate RC into other NLP tasks for which supervision exists such as textual entailment. However, these methods rely on templates that are manually created which is costly and requires domain expertise. In this paper, we present a novel strategy for template generation for relation classification, which is based on adapting Harris' distributional similarity principle to templates encoded using contextualized representations. Further, we perform empirical evaluation of different strategies for combining the automatically acquired templates with manual templates. The experimental results on TACRED show that our approach not only performs better than the zero-shot RC methods that only use manual templates, but also that it achieves state-of-the-art performance for zero-shot TACRED at 64.3 F1 score.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n It's not Sexually Suggestive; It's Educative | Separating Sex Education from Suggestive Content on TikTok Videos.\n \n \n \n \n\n\n \n Enfa George; and Mihai Surdeanu.\n\n\n \n\n\n\n In
Findings of the Association for Computational Linguistics: ACL 2023, pages 5904–5915, Toronto, Canada, July 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{george-surdeanu-2023-sexually,\n title = "It{'}s not Sexually Suggestive; It{'}s Educative | Separating Sex Education from Suggestive Content on {T}ik{T}ok Videos",\n author = "George, Enfa and\n Surdeanu, Mihai",\n booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",\n month = jul,\n year = "2023",\n address = "Toronto, Canada",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.findings-acl.365",\n pages = "5904--5915",\n abstract = "We introduce SexTok, a multi-modal dataset composed of TikTok videos labeled as sexually suggestive (from the annotator{'}s point of view), sex-educational content, or neither. Such a dataset is necessary to address the challenge of distinguishing between sexually suggestive content and virtual sex education videos on TikTok. Children{'}s exposure to sexually suggestive videos has been shown to have adversarial effects on their development (Collins et al. 2017). Meanwhile, virtual sex education, especially on subjects that are more relevant to the LGBTQIA+ community, is very valuable (Mitchell et al. 2014). The platform{'}s current system removes/punishes some of both types of videos, even though they serve different purposes. Our dataset contains video URLs, and it is also audio transcribed. To validate its importance, we explore two transformer-based models for classifying the videos. Our preliminary results suggest that the task of distinguishing between these types of videos is learnable but challenging. These experiments suggest that this dataset is meaningful and invites further study on the subject.",\n}\n\n
\n
\n\n\n
\n We introduce SexTok, a multi-modal dataset composed of TikTok videos labeled as sexually suggestive (from the annotator's point of view), sex-educational content, or neither. Such a dataset is necessary to address the challenge of distinguishing between sexually suggestive content and virtual sex education videos on TikTok. Children's exposure to sexually suggestive videos has been shown to have adversarial effects on their development (Collins et al. 2017). Meanwhile, virtual sex education, especially on subjects that are more relevant to the LGBTQIA+ community, is very valuable (Mitchell et al. 2014). The platform's current system removes/punishes some of both types of videos, even though they serve different purposes. Our dataset contains video URLs, and it is also audio transcribed. To validate its importance, we explore two transformer-based models for classifying the videos. Our preliminary results suggest that the task of distinguishing between these types of videos is learnable but challenging. These experiments suggest that this dataset is meaningful and invites further study on the subject.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n It Takes Two Flints to Make a Fire: Multitask Learning of Neural Relation and Explanation Classifiers.\n \n \n \n \n\n\n \n Zheng Tang; and Mihai Surdeanu.\n\n\n \n\n\n\n
Computational Linguistics, 49(1): 117-156. 03 2023.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 39 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{10.1162/coli_a_00463,\n author = {Tang, Zheng and Surdeanu, Mihai},\n title = "{It Takes Two Flints to Make a Fire: Multitask Learning of Neural Relation and Explanation Classifiers}",\n journal = {Computational Linguistics},\n volume = {49},\n number = {1},\n pages = {117-156},\n year = {2023},\n month = {03},\n abstract = "{We propose an explainable approach for relation extraction that mitigates the tension between generalization and explainability by jointly training for the two goals. Our approach uses a multi-task learning architecture, which jointly trains a classifier for relation extraction, and a sequence model that labels words in the context of the relations that explain the decisions of the relation classifier. We also convert the model outputs to rules to bring global explanations to this approach. This sequence model is trained using a hybrid strategy: supervised, when supervision from pre-existing patterns is available, and semi-supervised otherwise. In the latter situation, we treat the sequence model’s labels as latent variables, and learn the best assignment that maximizes the performance of the relation classifier. We evaluate the proposed approach on the two datasets and show that the sequence model provides labels that serve as accurate explanations for the relation classifier’s decisions, and, importantly, that the joint training generally improves the performance of the relation classifier. We also evaluate the performance of the generated rules and show that the new rules are a great add-on to the manual rules and bring the rule-based system much closer to the neural models.}",\n issn = {0891-2017},\n doi = {10.1162/coli_a_00463},\n url = {https://doi.org/10.1162/coli\\_a\\_00463},\n eprint = {https://direct.mit.edu/coli/article-pdf/49/1/117/2068962/coli\\_a\\_00463.pdf},\n}\n
\n
\n\n\n
\n We propose an explainable approach for relation extraction that mitigates the tension between generalization and explainability by jointly training for the two goals. Our approach uses a multi-task learning architecture, which jointly trains a classifier for relation extraction, and a sequence model that labels words in the context of the relations that explain the decisions of the relation classifier. We also convert the model outputs to rules to bring global explanations to this approach. This sequence model is trained using a hybrid strategy: supervised, when supervision from pre-existing patterns is available, and semi-supervised otherwise. In the latter situation, we treat the sequence model’s labels as latent variables, and learn the best assignment that maximizes the performance of the relation classifier. We evaluate the proposed approach on the two datasets and show that the sequence model provides labels that serve as accurate explanations for the relation classifier’s decisions, and, importantly, that the joint training generally improves the performance of the relation classifier. We also evaluate the performance of the generated rules and show that the new rules are a great add-on to the manual rules and bring the rule-based system much closer to the neural models.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n NEUROSTRUCTURAL DECODING: Neural Text Generation with Structural Constraints.\n \n \n \n \n\n\n \n Mohaddeseh Bastan; Mihai Surdeanu; and Niranjan Balasubramanian.\n\n\n \n\n\n\n In
Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (ACL), 2023. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{bastan2023-structural,\n title={NEUROSTRUCTURAL DECODING: Neural Text Generation with Structural Constraints},\n author={Bastan, Mohaddeseh and Surdeanu, Mihai and Balasubramanian, Niranjan},\n booktitle={Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (ACL)},\n year={2023},\n url={https://aclanthology.org/2023.acl-long.528.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Bootstrapping Neural Relation and Explanation Classifiers.\n \n \n \n \n\n\n \n Zheng Tang; and Mihai Surdeanu.\n\n\n \n\n\n\n In
Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (ACL), July 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{acl2023-bootstrapping-zheng,\n title = "Bootstrapping Neural Relation and Explanation Classifiers",\n author = "Zheng Tang and\n Surdeanu, Mihai",\n booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (ACL)",\n month = jul,\n year = "2023",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.acl-short.5.pdf",\n}\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Annotating and Training for Population Subjective Views.\n \n \n \n \n\n\n \n Maria Alexeeva; Caroline Hyland; Keith Alcock; Allegra A. Beal Cohen; Hubert Kanyamahanga; Isaac Kobby Anni; and Mihai Surdeanu.\n\n\n \n\n\n\n In
13th Workshop on Computational Approaches to Subjectivity, Sentiment & Social Media Analysis, 2023. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{\n alexeeva-et-al-2023-annotating,\n title={Annotating and Training for Population Subjective Views},\n author={Alexeeva, Maria and Hyland, Caroline and Alcock, Keith and Beal Cohen, Allegra A. and Kanyamahanga, Hubert and Anni, Isaac Kobby and Surdeanu, Mihai},\n booktitle={13th Workshop on Computational Approaches to Subjectivity, Sentiment {\\&} Social Media Analysis},\n year={2023},\n url={http://clulab.org/papers/wassa2023-beliefs.pdf}\n}\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Proceedings of the 5th Clinical Natural Language Processing Workshop.\n \n \n \n \n\n\n \n Tristan Naumann; Asma Ben Abacha; Steven Bethard; Kirk Roberts; and Anna Rumshisky.,\n editors.\n \n\n\n \n\n\n\n Association for Computational Linguistics. Toronto, Canada, July 2023.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@proceedings{clinicalnlp-2023-clinical,\n title = "Proceedings of the 5th Clinical Natural Language Processing Workshop",\n editor = "Naumann, Tristan and\n Ben Abacha, Asma and\n Bethard, Steven and\n Roberts, Kirk and\n Rumshisky, Anna",\n month = jul,\n year = "2023",\n address = "Toronto, Canada",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.clinicalnlp-1.0",\n keywords = {health applications},\n}\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n\n\n \n\n\n
\n
\n\n \n \n \n \n \n \n Gallagher at SemEval-2023 Task 5: Tackling Clickbait with Seq2Seq Models.\n \n \n \n \n\n\n \n Tugay Bilgis; Nimet Beyza Bozdag; and Steven Bethard.\n\n\n \n\n\n\n In
Proceedings of the The 17th International Workshop on Semantic Evaluation (SemEval-2023), pages 1650–1655, Toronto, Canada, July 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{bilgis-etal-2023-gallagher,\n title = "Gallagher at {S}em{E}val-2023 Task 5: Tackling Clickbait with {S}eq2{S}eq Models",\n author = "Bilgis, Tugay and\n Bozdag, Nimet Beyza and\n Bethard, Steven",\n booktitle = "Proceedings of the The 17th International Workshop on Semantic Evaluation (SemEval-2023)",\n month = jul,\n year = "2023",\n address = "Toronto, Canada",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.semeval-1.229",\n pages = "1650--1655",\n keywords = {shared task paper, social media},\n}\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Textual Entailment for Temporal Dependency Graph Parsing.\n \n \n \n \n\n\n \n Jiarui Yao; Steven Bethard; Kristin Wright-Bettner; Eli Goldner; David Harris; and Guergana Savova.\n\n\n \n\n\n\n In
Proceedings of the 5th Clinical Natural Language Processing Workshop, pages 191–199, Toronto, Canada, July 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{yao-etal-2023-textual,\n title = "Textual Entailment for Temporal Dependency Graph Parsing",\n author = "Yao, Jiarui and\n Bethard, Steven and\n Wright-Bettner, Kristin and\n Goldner, Eli and\n Harris, David and\n Savova, Guergana",\n booktitle = "Proceedings of the 5th Clinical Natural Language Processing Workshop",\n month = jul,\n year = "2023",\n address = "Toronto, Canada",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.clinicalnlp-1.25",\n pages = "191--199",\n keywords = {workshop paper, timelines, information extraction, health applications},\n}\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n clulab at MEDIQA-Chat 2023: Summarization and classification of medical dialogues.\n \n \n \n \n\n\n \n Kadir Bulut Ozler; and Steven Bethard.\n\n\n \n\n\n\n In
Proceedings of the 5th Clinical Natural Language Processing Workshop, pages 144–149, Toronto, Canada, July 2023. Association for Computational Linguistics\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ozler-bethard-2023-clulab,\n title = "clulab at {MEDIQA}-Chat 2023: Summarization and classification of medical dialogues",\n author = "Ozler, Kadir Bulut and\n Bethard, Steven",\n booktitle = "Proceedings of the 5th Clinical Natural Language Processing Workshop",\n month = jul,\n year = "2023",\n address = "Toronto, Canada",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.clinicalnlp-1.19",\n pages = "144--149",\n keywords = {shared task paper, health applications},\n}\n
\n
\n\n\n\n
\n\n\n \n\n\n
\n
\n\n \n \n \n \n \n \n Two-Stage Fine-Tuning for Improved Bias and Variance for Large Pretrained Language Models.\n \n \n \n \n\n\n \n Lijing Wang; Yingya Li; Timothy Miller; Steven Bethard; and Guergana Savova.\n\n\n \n\n\n\n In
Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15746–15761, Toronto, Canada, July 2023. Association for Computational Linguistics\n
[Acceptance rate 23%]\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@inproceedings{wang-etal-2023-two,\n title = "Two-Stage Fine-Tuning for Improved Bias and Variance for Large Pretrained Language Models",\n author = "Wang, Lijing and\n Li, Yingya and\n Miller, Timothy and\n Bethard, Steven and\n Savova, Guergana",\n booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = jul,\n year = "2023",\n address = "Toronto, Canada",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/2023.acl-long.877",\n pages = "15746--15761",\n keywords = {machine learning},\n note = {[Acceptance rate 23\\%]},\n}\n
\n
\n\n\n\n
\n\n\n \n\n\n
\n
\n\n \n \n \n \n \n \n Engagement with incivility in tweets from and directed at local elected officials.\n \n \n \n \n\n\n \n Stephen A. Rains; Kate Kenski; Leah Dajches; Kaylin Duncan; Kun Yan; Yejin Shin; Jules L. Barbati; Steven Bethard; Kevin Coe; and Yotam Shmargad.\n\n\n \n\n\n\n
Communication and Democracy, 57(1): 143-152. 2023.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@article{doi:10.1080/27671127.2023.2195467,\nauthor = {Stephen A. Rains and Kate Kenski and Leah Dajches and Kaylin Duncan and Kun Yan and Yejin Shin and Jules L. Barbati and Steven Bethard and Kevin Coe and Yotam Shmargad},\ntitle = {Engagement with incivility in tweets from and directed at local elected officials},\njournal = {Communication and Democracy},\nvolume = {57},\nnumber = {1},\npages = {143-152},\nyear = {2023},\npublisher = {Routledge},\ndoi = {10.1080/27671127.2023.2195467},\nURL = {https://doi.org/10.1080/27671127.2023.2195467},\nkeywords = {social media},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n