Learning to Extract Symbolic Knowledge from the World Wide Web. Craven, M., DiPasquo, D., Freitag, D., McCallum, A., Mitchell, T., Nigam, K., & Slattery, S. In World Wide Web Internet And Web Information Systems, of AAAI '98/IAAI '98, pages 509-516, 1998. Citeseer.
Learning to Extract Symbolic Knowledge from the World Wide Web [pdf]Paper  Learning to Extract Symbolic Knowledge from the World Wide Web [link]Website  abstract   bibtex   
The World Wide Web is a vast source of information accessible to computers, but understandable only to humans. The goal of the research described here is to automatically create a computer understandable world wide knowledge base whose content mirrors that of the World Wide Web. Such a knowledge base would enable much more effective retrieval of Web information, and promote new uses of the Web to support knowledge-based inference and problem solving. Our approach is to develop a trainable information extraction system that takes two inputs: an ontology defining the classes and relations of interest, and a set of training data consisting of labeled regions of hypertext representing instances of these classes and relations. Given these inputs, the system learns to extract information from other pages and hyperlinks on the Web. This paper describes our general approach, several machine learning algorithms for this task, and promising initial results with a prototype system.
@inProceedings{
 title = {Learning to Extract Symbolic Knowledge from the World Wide Web},
 type = {inProceedings},
 year = {1998},
 identifiers = {[object Object]},
 pages = {509-516},
 websites = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.57.4309&rep=rep1&type=pdf},
 publisher = {Citeseer},
 institution = {DARPA HPKB program},
 series = {AAAI '98/IAAI '98},
 id = {d0bdd182-8640-3a7a-ab06-639f9064f845},
 created = {2012-02-09T21:39:35.000Z},
 file_attached = {true},
 profile_id = {5284e6aa-156c-3ce5-bc0e-b80cf09f3ef6},
 group_id = {066b42c8-f712-3fc3-abb2-225c158d2704},
 last_modified = {2017-03-14T14:36:19.698Z},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {true},
 hidden = {false},
 citation_key = {Craven1998},
 private_publication = {false},
 abstract = {The World Wide Web is a vast source of information accessible to computers, but understandable only to humans. The goal of the research described here is to automatically create a computer understandable world wide knowledge base whose content mirrors that of the World Wide Web. Such a knowledge base would enable much more effective retrieval of Web information, and promote new uses of the Web to support knowledge-based inference and problem solving. Our approach is to develop a trainable information extraction system that takes two inputs: an ontology defining the classes and relations of interest, and a set of training data consisting of labeled regions of hypertext representing instances of these classes and relations. Given these inputs, the system learns to extract information from other pages and hyperlinks on the Web. This paper describes our general approach, several machine learning algorithms for this task, and promising initial results with a prototype system.},
 bibtype = {inProceedings},
 author = {Craven, M and DiPasquo, D and Freitag, D and McCallum, A and Mitchell, T and Nigam, K and Slattery, S},
 booktitle = {World Wide Web Internet And Web Information Systems}
}
Downloads: 0