Multi-label Few-shot ICD Coding as Autoregressive Generation with Prompt. Yang, Z., Kwon, S., Yao, Z., & Yu, H. Proceedings of the ... AAAI Conference on Artificial Intelligence. AAAI Conference on Artificial Intelligence, 37(4):5366–5374, June, 2023. Paper doi abstract bibtex Automatic International Classification of Diseases (ICD) coding aims to assign multiple ICD codes to a medical note with an average of 3,000+ tokens. This task is challenging due to the high-dimensional space of multi-label assignment (155,000+ ICD code candidates) and the long-tail challenge - Many ICD codes are infrequently assigned yet infrequent ICD codes are important clinically. This study addresses the long-tail challenge by transforming this multi-label classification task into an autoregressive generation task. Specifically, we first introduce a novel pretraining objective to generate free text diagnoses and procedures using the SOAP structure, the medical logic physicians use for note documentation. Second, instead of directly predicting the high dimensional space of ICD codes, our model generates the lower dimension of text descriptions, which then infers ICD codes. Third, we designed a novel prompt template for multi-label classification. We evaluate our Generation with Prompt (GPsoap) model with the benchmark of all code assignment (MIMIC-III-full) and few shot ICD code assignment evaluation benchmark (MIMIC-III-few). Experiments on MIMIC-III-few show that our model performs with a marco F130.2, which substantially outperforms the previous MIMIC-III-full SOTA model (marco F1 4.3) and the model specifically designed for few/zero shot setting (marco F1 18.7). Finally, we design a novel ensemble learner, a cross-attention reranker with prompts, to integrate previous SOTA and our best few-shot coding predictions. Experiments on MIMIC-III-full show that our ensemble learner substantially improves both macro and micro F1, from 10.4 to 14.6 and from 58.2 to 59.1, respectively.
@article{yang_multi-label_2023,
title = {Multi-label {Few}-shot {ICD} {Coding} as {Autoregressive} {Generation} with {Prompt}},
volume = {37},
issn = {2159-5399},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10457101/},
doi = {10.1609/aaai.v37i4.25668},
abstract = {Automatic International Classification of Diseases (ICD) coding aims to assign multiple ICD codes to a medical note with an average of 3,000+ tokens. This task is challenging due to the high-dimensional space of multi-label assignment (155,000+ ICD code candidates) and the long-tail challenge - Many ICD codes are infrequently assigned yet infrequent ICD codes are important clinically. This study addresses the long-tail challenge by transforming this multi-label classification task into an autoregressive generation task. Specifically, we first introduce a novel pretraining objective to generate free text diagnoses and procedures using the SOAP structure, the medical logic physicians use for note documentation. Second, instead of directly predicting the high dimensional space of ICD codes, our model generates the lower dimension of text descriptions, which then infers ICD codes. Third, we designed a novel prompt template for multi-label classification. We evaluate our Generation with Prompt (GPsoap) model with the benchmark of all code assignment (MIMIC-III-full) and few shot ICD code assignment evaluation benchmark (MIMIC-III-few). Experiments on MIMIC-III-few show that our model performs with a marco F130.2, which substantially outperforms the previous MIMIC-III-full SOTA model (marco F1 4.3) and the model specifically designed for few/zero shot setting (marco F1 18.7). Finally, we design a novel ensemble learner, a cross-attention reranker with prompts, to integrate previous SOTA and our best few-shot coding predictions. Experiments on MIMIC-III-full show that our ensemble learner substantially improves both macro and micro F1, from 10.4 to 14.6 and from 58.2 to 59.1, respectively.},
number = {4},
urldate = {2024-04-10},
journal = {Proceedings of the ... AAAI Conference on Artificial Intelligence. AAAI Conference on Artificial Intelligence},
author = {Yang, Zhichao and Kwon, Sunjae and Yao, Zonghai and Yu, Hong},
month = jun,
year = {2023},
pmid = {37635946},
pmcid = {PMC10457101},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language},
pages = {5366--5374},
}
Downloads: 0
{"_id":"cY258tej4xXH7cbyu","bibbaseid":"yang-kwon-yao-yu-multilabelfewshoticdcodingasautoregressivegenerationwithprompt-2023","author_short":["Yang, Z.","Kwon, S.","Yao, Z.","Yu, H."],"bibdata":{"bibtype":"article","type":"article","title":"Multi-label Few-shot ICD Coding as Autoregressive Generation with Prompt","volume":"37","issn":"2159-5399","url":"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10457101/","doi":"10.1609/aaai.v37i4.25668","abstract":"Automatic International Classification of Diseases (ICD) coding aims to assign multiple ICD codes to a medical note with an average of 3,000+ tokens. This task is challenging due to the high-dimensional space of multi-label assignment (155,000+ ICD code candidates) and the long-tail challenge - Many ICD codes are infrequently assigned yet infrequent ICD codes are important clinically. This study addresses the long-tail challenge by transforming this multi-label classification task into an autoregressive generation task. Specifically, we first introduce a novel pretraining objective to generate free text diagnoses and procedures using the SOAP structure, the medical logic physicians use for note documentation. Second, instead of directly predicting the high dimensional space of ICD codes, our model generates the lower dimension of text descriptions, which then infers ICD codes. Third, we designed a novel prompt template for multi-label classification. We evaluate our Generation with Prompt (GPsoap) model with the benchmark of all code assignment (MIMIC-III-full) and few shot ICD code assignment evaluation benchmark (MIMIC-III-few). Experiments on MIMIC-III-few show that our model performs with a marco F130.2, which substantially outperforms the previous MIMIC-III-full SOTA model (marco F1 4.3) and the model specifically designed for few/zero shot setting (marco F1 18.7). Finally, we design a novel ensemble learner, a cross-attention reranker with prompts, to integrate previous SOTA and our best few-shot coding predictions. Experiments on MIMIC-III-full show that our ensemble learner substantially improves both macro and micro F1, from 10.4 to 14.6 and from 58.2 to 59.1, respectively.","number":"4","urldate":"2024-04-10","journal":"Proceedings of the ... AAAI Conference on Artificial Intelligence. AAAI Conference on Artificial Intelligence","author":[{"propositions":[],"lastnames":["Yang"],"firstnames":["Zhichao"],"suffixes":[]},{"propositions":[],"lastnames":["Kwon"],"firstnames":["Sunjae"],"suffixes":[]},{"propositions":[],"lastnames":["Yao"],"firstnames":["Zonghai"],"suffixes":[]},{"propositions":[],"lastnames":["Yu"],"firstnames":["Hong"],"suffixes":[]}],"month":"June","year":"2023","pmid":"37635946","pmcid":"PMC10457101","keywords":"Computer Science - Artificial Intelligence, Computer Science - Computation and Language","pages":"5366–5374","bibtex":"@article{yang_multi-label_2023,\n\ttitle = {Multi-label {Few}-shot {ICD} {Coding} as {Autoregressive} {Generation} with {Prompt}},\n\tvolume = {37},\n\tissn = {2159-5399},\n\turl = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10457101/},\n\tdoi = {10.1609/aaai.v37i4.25668},\n\tabstract = {Automatic International Classification of Diseases (ICD) coding aims to assign multiple ICD codes to a medical note with an average of 3,000+ tokens. This task is challenging due to the high-dimensional space of multi-label assignment (155,000+ ICD code candidates) and the long-tail challenge - Many ICD codes are infrequently assigned yet infrequent ICD codes are important clinically. This study addresses the long-tail challenge by transforming this multi-label classification task into an autoregressive generation task. Specifically, we first introduce a novel pretraining objective to generate free text diagnoses and procedures using the SOAP structure, the medical logic physicians use for note documentation. Second, instead of directly predicting the high dimensional space of ICD codes, our model generates the lower dimension of text descriptions, which then infers ICD codes. Third, we designed a novel prompt template for multi-label classification. We evaluate our Generation with Prompt (GPsoap) model with the benchmark of all code assignment (MIMIC-III-full) and few shot ICD code assignment evaluation benchmark (MIMIC-III-few). Experiments on MIMIC-III-few show that our model performs with a marco F130.2, which substantially outperforms the previous MIMIC-III-full SOTA model (marco F1 4.3) and the model specifically designed for few/zero shot setting (marco F1 18.7). Finally, we design a novel ensemble learner, a cross-attention reranker with prompts, to integrate previous SOTA and our best few-shot coding predictions. Experiments on MIMIC-III-full show that our ensemble learner substantially improves both macro and micro F1, from 10.4 to 14.6 and from 58.2 to 59.1, respectively.},\n\tnumber = {4},\n\turldate = {2024-04-10},\n\tjournal = {Proceedings of the ... AAAI Conference on Artificial Intelligence. AAAI Conference on Artificial Intelligence},\n\tauthor = {Yang, Zhichao and Kwon, Sunjae and Yao, Zonghai and Yu, Hong},\n\tmonth = jun,\n\tyear = {2023},\n\tpmid = {37635946},\n\tpmcid = {PMC10457101},\n\tkeywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language},\n\tpages = {5366--5374},\n}\n\n","author_short":["Yang, Z.","Kwon, S.","Yao, Z.","Yu, H."],"key":"yang_multi-label_2023","id":"yang_multi-label_2023","bibbaseid":"yang-kwon-yao-yu-multilabelfewshoticdcodingasautoregressivegenerationwithprompt-2023","role":"author","urls":{"Paper":"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10457101/"},"keyword":["Computer Science - Artificial Intelligence","Computer Science - Computation and Language"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"http://fenway.cs.uml.edu/papers/pubs-all.bib","dataSources":["TqaA9miSB65nRfS5H"],"keywords":["computer science - artificial intelligence","computer science - computation and language"],"search_terms":["multi","label","few","shot","icd","coding","autoregressive","generation","prompt","yang","kwon","yao","yu"],"title":"Multi-label Few-shot ICD Coding as Autoregressive Generation with Prompt","year":2023}