How to grow a mind: statistics, structure, and abstraction. Tenenbaum, J. B., Kemp, C., Griffiths, T. L., & Goodman, N. D. Science, 331(6022):1279–1285, 2011. doi abstract bibtex In coming to understand the world-in learning concepts, acquiring language, and grasping causal relations-our minds make inferences that appear to go far beyond the data available. How do we do it? This review describes recent approaches to reverse-engineering human learning and cognitive development and, in parallel, engineering more humanlike machine learning systems. Computational models that perform probabilistic inference over hierarchies of flexibly structured representations can address some of the deepest questions about the nature and origins of human thought: How does abstract knowledge guide learning and reasoning from sparse data? What forms does our knowledge take, across different domains and tasks? And how is that abstract knowledge itself acquired?
@Article{Tenenbaum2011,
author = {Tenenbaum, Joshua B. and Kemp, Charles and Griffiths, Thomas L. and Goodman, Noah D.},
journal = {Science},
title = {How to grow a mind: statistics, structure, and abstraction.},
year = {2011},
number = {6022},
pages = {1279--1285},
volume = {331},
abstract = {In coming to understand the world-in learning concepts, acquiring
language, and grasping causal relations-our minds make inferences
that appear to go far beyond the data available. How do we do it?
This review describes recent approaches to reverse-engineering human
learning and cognitive development and, in parallel, engineering
more humanlike machine learning systems. Computational models that
perform probabilistic inference over hierarchies of flexibly structured
representations can address some of the deepest questions about the
nature and origins of human thought: How does abstract knowledge
guide learning and reasoning from sparse data? What forms does our
knowledge take, across different domains and tasks? And how is that
abstract knowledge itself acquired?},
doi = {10.1126/science.1192788},
institution = {Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA 02139, USA. jbt@mit.edu},
keywords = {Artificial Intelligence; Bayes Theorem; Cognition; Concept Formation; Humans; Knowledge; Learning; Models, Statistical; Theory of Mind; Thinking},
language = {eng},
medline-pst = {ppublish},
pmid = {21393536},
timestamp = {2013.03.21},
}
Downloads: 0
{"_id":"shfdPC6EhkSRmvu7E","bibbaseid":"tenenbaum-kemp-griffiths-goodman-howtogrowamindstatisticsstructureandabstraction-2011","authorIDs":[],"author_short":["Tenenbaum, J. B.","Kemp, C.","Griffiths, T. L.","Goodman, N. D."],"bibdata":{"bibtype":"article","type":"article","author":[{"propositions":[],"lastnames":["Tenenbaum"],"firstnames":["Joshua","B."],"suffixes":[]},{"propositions":[],"lastnames":["Kemp"],"firstnames":["Charles"],"suffixes":[]},{"propositions":[],"lastnames":["Griffiths"],"firstnames":["Thomas","L."],"suffixes":[]},{"propositions":[],"lastnames":["Goodman"],"firstnames":["Noah","D."],"suffixes":[]}],"journal":"Science","title":"How to grow a mind: statistics, structure, and abstraction.","year":"2011","number":"6022","pages":"1279–1285","volume":"331","abstract":"In coming to understand the world-in learning concepts, acquiring language, and grasping causal relations-our minds make inferences that appear to go far beyond the data available. How do we do it? This review describes recent approaches to reverse-engineering human learning and cognitive development and, in parallel, engineering more humanlike machine learning systems. Computational models that perform probabilistic inference over hierarchies of flexibly structured representations can address some of the deepest questions about the nature and origins of human thought: How does abstract knowledge guide learning and reasoning from sparse data? What forms does our knowledge take, across different domains and tasks? And how is that abstract knowledge itself acquired?","doi":"10.1126/science.1192788","institution":"Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA 02139, USA. jbt@mit.edu","keywords":"Artificial Intelligence; Bayes Theorem; Cognition; Concept Formation; Humans; Knowledge; Learning; Models, Statistical; Theory of Mind; Thinking","language":"eng","medline-pst":"ppublish","pmid":"21393536","timestamp":"2013.03.21","bibtex":"@Article{Tenenbaum2011,\n author = {Tenenbaum, Joshua B. and Kemp, Charles and Griffiths, Thomas L. and Goodman, Noah D.},\n journal = {Science},\n title = {How to grow a mind: statistics, structure, and abstraction.},\n year = {2011},\n number = {6022},\n pages = {1279--1285},\n volume = {331},\n abstract = {In coming to understand the world-in learning concepts, acquiring\n\tlanguage, and grasping causal relations-our minds make inferences\n\tthat appear to go far beyond the data available. How do we do it?\n\tThis review describes recent approaches to reverse-engineering human\n\tlearning and cognitive development and, in parallel, engineering\n\tmore humanlike machine learning systems. Computational models that\n\tperform probabilistic inference over hierarchies of flexibly structured\n\trepresentations can address some of the deepest questions about the\n\tnature and origins of human thought: How does abstract knowledge\n\tguide learning and reasoning from sparse data? What forms does our\n\tknowledge take, across different domains and tasks? And how is that\n\tabstract knowledge itself acquired?},\n doi = {10.1126/science.1192788},\n institution = {Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA 02139, USA. jbt@mit.edu},\n keywords = {Artificial Intelligence; Bayes Theorem; Cognition; Concept Formation; Humans; Knowledge; Learning; Models, Statistical; Theory of Mind; Thinking},\n language = {eng},\n medline-pst = {ppublish},\n pmid = {21393536},\n timestamp = {2013.03.21},\n}\n\n","author_short":["Tenenbaum, J. B.","Kemp, C.","Griffiths, T. L.","Goodman, N. D."],"key":"Tenenbaum2011","id":"Tenenbaum2011","bibbaseid":"tenenbaum-kemp-griffiths-goodman-howtogrowamindstatisticsstructureandabstraction-2011","role":"author","urls":{},"keyword":["Artificial Intelligence; Bayes Theorem; Cognition; Concept Formation; Humans; Knowledge; Learning; Models","Statistical; Theory of Mind; Thinking"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://endress.org/publications/ansgar.bib","creationDate":"2020-01-27T02:13:34.391Z","downloads":0,"keywords":["artificial intelligence; bayes theorem; cognition; concept formation; humans; knowledge; learning; models","statistical; theory of mind; thinking"],"search_terms":["grow","mind","statistics","structure","abstraction","tenenbaum","kemp","griffiths","goodman"],"title":"How to grow a mind: statistics, structure, and abstraction.","year":2011,"dataSources":["SzgNB6yMASNi6tysA","xPGxHAeh3vZpx4yyE","rQFxZQs78YQJ9m34s","XFrKPG99s5t3W7xuW","TXa55dQbNoWnaGmMq"]}