Learning to Rank Figures within a Biomedical Article. Liu, F. & Yu, H. PLoS ONE, 9(3):e61567, March, 2014. Paper doi abstract bibtex Hundreds of millions of figures are available in biomedical literature, representing important biomedical experimental evidence. This ever-increasing sheer volume has made it difficult for scientists to effectively and accurately access figures of their interest, the process of which is crucial for validating research facts and for formulating or testing novel research hypotheses. Current figure search applications can't fully meet this challenge as the "bag of figures" assumption doesn't take into account the relationship among figures. In our previous study, hundreds of biomedical researchers have annotated articles in which they serve as corresponding authors. They ranked each figure in their paper based on a figure's importance at their discretion, referred to as "figure ranking". Using this collection of annotated data, we investigated computational approaches to automatically rank figures. We exploited and extended the state-of-the-art listwise learning-to-rank algorithms and developed a new supervised-learning model BioFigRank. The cross-validation results show that BioFigRank yielded the best performance compared with other state-of-the-art computational models, and the greedy feature selection can further boost the ranking performance significantly. Furthermore, we carry out the evaluation by comparing BioFigRank with three-level competitive domain-specific human experts: (1) First Author, (2) Non-Author-In-Domain-Expert who is not the author nor co-author of an article but who works in the same field of the corresponding author of the article, and (3) Non-Author-Out-Domain-Expert who is not the author nor co-author of an article and who may or may not work in the same field of the corresponding author of an article. Our results show that BioFigRank outperforms Non-Author-Out-Domain-Expert and performs as well as Non-Author-In-Domain-Expert. Although BioFigRank underperforms First Author, since most biomedical researchers are either in- or out-domain-experts for an article, we conclude that BioFigRank represents an artificial intelligence system that offers expert-level intelligence to help biomedical researchers to navigate increasingly proliferated big data efficiently.
@article{liu_learning_2014,
title = {Learning to {Rank} {Figures} within a {Biomedical} {Article}},
volume = {9},
issn = {1932-6203},
url = {http://dx.plos.org/10.1371/journal.pone.0061567},
doi = {10.1371/journal.pone.0061567},
abstract = {Hundreds of millions of figures are available in biomedical literature, representing important biomedical experimental evidence. This ever-increasing sheer volume has made it difficult for scientists to effectively and accurately access figures of their interest, the process of which is crucial for validating research facts and for formulating or testing novel research hypotheses. Current figure search applications can't fully meet this challenge as the "bag of figures" assumption doesn't take into account the relationship among figures. In our previous study, hundreds of biomedical researchers have annotated articles in which they serve as corresponding authors. They ranked each figure in their paper based on a figure's importance at their discretion, referred to as "figure ranking". Using this collection of annotated data, we investigated computational approaches to automatically rank figures. We exploited and extended the state-of-the-art listwise learning-to-rank algorithms and developed a new supervised-learning model BioFigRank. The cross-validation results show that BioFigRank yielded the best performance compared with other state-of-the-art computational models, and the greedy feature selection can further boost the ranking performance significantly. Furthermore, we carry out the evaluation by comparing BioFigRank with three-level competitive domain-specific human experts: (1) First Author, (2) Non-Author-In-Domain-Expert who is not the author nor co-author of an article but who works in the same field of the corresponding author of the article, and (3) Non-Author-Out-Domain-Expert who is not the author nor co-author of an article and who may or may not work in the same field of the corresponding author of an article. Our results show that BioFigRank outperforms Non-Author-Out-Domain-Expert and performs as well as Non-Author-In-Domain-Expert. Although BioFigRank underperforms First Author, since most biomedical researchers are either in- or out-domain-experts for an article, we conclude that BioFigRank represents an artificial intelligence system that offers expert-level intelligence to help biomedical researchers to navigate increasingly proliferated big data efficiently.},
language = {en},
number = {3},
urldate = {2015-02-26},
journal = {PLoS ONE},
author = {Liu, Feifan and Yu, Hong},
editor = {Preis, Tobias},
month = mar,
year = {2014},
pmid = {24625719 PMCID: PMC3953065},
pages = {e61567},
}
Downloads: 0
{"_id":"AXqsfEaRQtgxFmdKr","bibbaseid":"liu-yu-learningtorankfigureswithinabiomedicalarticle-2014","author_short":["Liu, F.","Yu, H."],"bibdata":{"bibtype":"article","type":"article","title":"Learning to Rank Figures within a Biomedical Article","volume":"9","issn":"1932-6203","url":"http://dx.plos.org/10.1371/journal.pone.0061567","doi":"10.1371/journal.pone.0061567","abstract":"Hundreds of millions of figures are available in biomedical literature, representing important biomedical experimental evidence. This ever-increasing sheer volume has made it difficult for scientists to effectively and accurately access figures of their interest, the process of which is crucial for validating research facts and for formulating or testing novel research hypotheses. Current figure search applications can't fully meet this challenge as the \"bag of figures\" assumption doesn't take into account the relationship among figures. In our previous study, hundreds of biomedical researchers have annotated articles in which they serve as corresponding authors. They ranked each figure in their paper based on a figure's importance at their discretion, referred to as \"figure ranking\". Using this collection of annotated data, we investigated computational approaches to automatically rank figures. We exploited and extended the state-of-the-art listwise learning-to-rank algorithms and developed a new supervised-learning model BioFigRank. The cross-validation results show that BioFigRank yielded the best performance compared with other state-of-the-art computational models, and the greedy feature selection can further boost the ranking performance significantly. Furthermore, we carry out the evaluation by comparing BioFigRank with three-level competitive domain-specific human experts: (1) First Author, (2) Non-Author-In-Domain-Expert who is not the author nor co-author of an article but who works in the same field of the corresponding author of the article, and (3) Non-Author-Out-Domain-Expert who is not the author nor co-author of an article and who may or may not work in the same field of the corresponding author of an article. Our results show that BioFigRank outperforms Non-Author-Out-Domain-Expert and performs as well as Non-Author-In-Domain-Expert. Although BioFigRank underperforms First Author, since most biomedical researchers are either in- or out-domain-experts for an article, we conclude that BioFigRank represents an artificial intelligence system that offers expert-level intelligence to help biomedical researchers to navigate increasingly proliferated big data efficiently.","language":"en","number":"3","urldate":"2015-02-26","journal":"PLoS ONE","author":[{"propositions":[],"lastnames":["Liu"],"firstnames":["Feifan"],"suffixes":[]},{"propositions":[],"lastnames":["Yu"],"firstnames":["Hong"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Preis"],"firstnames":["Tobias"],"suffixes":[]}],"month":"March","year":"2014","pmid":"24625719 PMCID: PMC3953065","pages":"e61567","bibtex":"@article{liu_learning_2014,\n\ttitle = {Learning to {Rank} {Figures} within a {Biomedical} {Article}},\n\tvolume = {9},\n\tissn = {1932-6203},\n\turl = {http://dx.plos.org/10.1371/journal.pone.0061567},\n\tdoi = {10.1371/journal.pone.0061567},\n\tabstract = {Hundreds of millions of figures are available in biomedical literature, representing important biomedical experimental evidence. This ever-increasing sheer volume has made it difficult for scientists to effectively and accurately access figures of their interest, the process of which is crucial for validating research facts and for formulating or testing novel research hypotheses. Current figure search applications can't fully meet this challenge as the \"bag of figures\" assumption doesn't take into account the relationship among figures. In our previous study, hundreds of biomedical researchers have annotated articles in which they serve as corresponding authors. They ranked each figure in their paper based on a figure's importance at their discretion, referred to as \"figure ranking\". Using this collection of annotated data, we investigated computational approaches to automatically rank figures. We exploited and extended the state-of-the-art listwise learning-to-rank algorithms and developed a new supervised-learning model BioFigRank. The cross-validation results show that BioFigRank yielded the best performance compared with other state-of-the-art computational models, and the greedy feature selection can further boost the ranking performance significantly. Furthermore, we carry out the evaluation by comparing BioFigRank with three-level competitive domain-specific human experts: (1) First Author, (2) Non-Author-In-Domain-Expert who is not the author nor co-author of an article but who works in the same field of the corresponding author of the article, and (3) Non-Author-Out-Domain-Expert who is not the author nor co-author of an article and who may or may not work in the same field of the corresponding author of an article. Our results show that BioFigRank outperforms Non-Author-Out-Domain-Expert and performs as well as Non-Author-In-Domain-Expert. Although BioFigRank underperforms First Author, since most biomedical researchers are either in- or out-domain-experts for an article, we conclude that BioFigRank represents an artificial intelligence system that offers expert-level intelligence to help biomedical researchers to navigate increasingly proliferated big data efficiently.},\n\tlanguage = {en},\n\tnumber = {3},\n\turldate = {2015-02-26},\n\tjournal = {PLoS ONE},\n\tauthor = {Liu, Feifan and Yu, Hong},\n\teditor = {Preis, Tobias},\n\tmonth = mar,\n\tyear = {2014},\n\tpmid = {24625719 PMCID: PMC3953065},\n\tpages = {e61567},\n}\n\n","author_short":["Liu, F.","Yu, H."],"editor_short":["Preis, T."],"key":"liu_learning_2014","id":"liu_learning_2014","bibbaseid":"liu-yu-learningtorankfigureswithinabiomedicalarticle-2014","role":"author","urls":{"Paper":"http://dx.plos.org/10.1371/journal.pone.0061567"},"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"http://fenway.cs.uml.edu/papers/pubs-all.bib","dataSources":["TqaA9miSB65nRfS5H"],"keywords":[],"search_terms":["learning","rank","figures","within","biomedical","article","liu","yu"],"title":"Learning to Rank Figures within a Biomedical Article","year":2014}