Humans Hallucinate Too: Language Models Identify and Correct Subjective Annotation Errors With Label-in-a-Haystack Prompts. Chochlakis, G., Wu, P., Bedi, A., Ma, M., Lerman, K., & Narayanan, S. CoRR, 2025.
Humans Hallucinate Too: Language Models Identify and Correct Subjective Annotation Errors With Label-in-a-Haystack Prompts [link]Paper  doi  bibtex   
@article{DBLP:journals/corr/abs-2505-17222,
  author       = {Georgios Chochlakis and
                  Peter Wu and
                  Arjun Bedi and
                  Marcus Ma and
                  Kristina Lerman and
                  Shrikanth Narayanan},
  title        = {Humans Hallucinate Too: Language Models Identify and Correct Subjective
                  Annotation Errors With Label-in-a-Haystack Prompts},
  journal      = {CoRR},
  volume       = {abs/2505.17222},
  year         = {2025},
  url          = {https://doi.org/10.48550/arXiv.2505.17222},
  doi          = {10.48550/ARXIV.2505.17222},
  eprinttype    = {arXiv},
  eprint       = {2505.17222},
  timestamp    = {Thu, 26 Jun 2025 01:00:00 +0200},
  biburl       = {https://dblp.org/rec/journals/corr/abs-2505-17222.bib},
  bibsource    = {dblp computer science bibliography, https://dblp.org}
}

Downloads: 0