H4H: A Comprehensive Repository of Housing Resources for Homelessness. Osebe, S., Tsai, J., & Hong, Y. AMIA Summits on Translational Science Proceedings, 2023:427–437, June, 2023.
H4H: A Comprehensive Repository of Housing Resources for Homelessness [link]Paper  abstract   bibtex   
More than half a million people were experiencing homelessness in America on any given night in 2021, yet only around 50% of them used shelters. To address unmet needs in homelessness, we report the creation of housing for homeless (H4H), the largest comprehensive repository of emergency shelters and other housing resources, from which we deployed state-of-the-art natural language processing approaches to extract information vital to individuals experiencing homelessness, including admission process, service provided, duration of stay, and eligibility. We frame information extraction as a question-answer task. Using 2,055 question-answer pairs for training and evaluation, the best performing system was a two-step classification and question-answering Roberta model with prompting, achieving a macro-average of 75.83 for F1 score. H4H and the annotated entries are publicly available as a benchmark dataset.
@article{osebe_h4h_2023,
	title = {{H4H}: {A} {Comprehensive} {Repository} of {Housing} {Resources} for {Homelessness}},
	volume = {2023},
	issn = {2153-4063},
	shorttitle = {{H4H}},
	url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10283121/},
	abstract = {More than half a million people were experiencing homelessness in America on any given night in 2021, yet only around 50\% of them used shelters. To address unmet needs in homelessness, we report the creation of housing for homeless (H4H), the largest comprehensive repository of emergency shelters and other housing resources, from which we deployed state-of-the-art natural language processing approaches to extract information vital to individuals experiencing homelessness, including admission process, service provided, duration of stay, and eligibility. We frame information extraction as a question-answer task. Using 2,055 question-answer pairs for training and evaluation, the best performing system was a two-step classification and question-answering Roberta model with prompting, achieving a macro-average of 75.83 for F1 score. H4H and the annotated entries are publicly available as a benchmark dataset.},
	urldate = {2024-04-10},
	journal = {AMIA Summits on Translational Science Proceedings},
	author = {Osebe, Samue and Tsai, Jack and Hong, Yu},
	month = jun,
	year = {2023},
	pmid = {37350907},
	pmcid = {PMC10283121},
	pages = {427--437},
}

Downloads: 0