Learnability Can Be Undecidable. Ben-David, S., Hrube ̌s, P., Moran, S., Shpilka, A., & Yehudayoff, A. *Nature Machine Intelligence*, 1(1):44–48, January, 2019. doi abstract bibtex The mathematical foundations of machine learning play a key role in the development of the field. They improve our understanding and provide tools for designing new learning paradigms. The advantages of mathematics, however, sometimes come with a cost. Gödel and Cohen showed, in a nutshell, that not everything is provable. Here we show that machine learning shares this fate. We describe simple scenarios where learnability cannot be proved nor refuted using the standard axioms of mathematics. Our proof is based on the fact the continuum hypothesis cannot be proved nor refuted. We show that, in some cases, a solution to the 'estimating the maximum' problem is equivalent to the continuum hypothesis. The main idea is to prove an equivalence between learnability and compression.

@article{ben-davidLearnabilityCanBe2019,
title = {Learnability Can Be Undecidable},
author = {{Ben-David}, Shai and Hrube{\v s}, Pavel and Moran, Shay and Shpilka, Amir and Yehudayoff, Amir},
year = {2019},
month = jan,
volume = {1},
pages = {44--48},
issn = {2522-5839},
doi = {10.1038/s42256-018-0002-3},
abstract = {The mathematical foundations of machine learning play a key role in the development of the field. They improve our understanding and provide tools for designing new learning paradigms. The advantages of mathematics, however, sometimes come with a cost. G\"odel and Cohen showed, in a nutshell, that not everything is provable. Here we show that machine learning shares this fate. We describe simple scenarios where learnability cannot be proved nor refuted using the standard axioms of mathematics. Our proof is based on the fact the continuum hypothesis cannot be proved nor refuted. We show that, in some cases, a solution to the 'estimating the maximum' problem is equivalent to the continuum hypothesis. The main idea is to prove an equivalence between learnability and compression.},
journal = {Nature Machine Intelligence},
keywords = {*imported-from-citeulike-INRMM,~INRMM-MiD:c-14677845,artificial-intelligence,epistemology,limiting-factor,machine-learning,mathematics,modelling-uncertainty,software-uncertainty,unexpected-effect},
lccn = {INRMM-MiD:c-14677845},
number = {1}
}

Downloads: 0

{"_id":"d8irRpiQTQC7678ka","bibbaseid":"bendavid-hrubes-moran-shpilka-yehudayoff-learnabilitycanbeundecidable-2019","authorIDs":[],"author_short":["Ben-David, S.","Hrube ̌s, P.","Moran, S.","Shpilka, A.","Yehudayoff, A."],"bibdata":{"bibtype":"article","type":"article","title":"Learnability Can Be Undecidable","author":[{"propositions":[],"lastnames":["Ben-David"],"firstnames":["Shai"],"suffixes":[]},{"propositions":[],"lastnames":["Hrube ̌s"],"firstnames":["Pavel"],"suffixes":[]},{"propositions":[],"lastnames":["Moran"],"firstnames":["Shay"],"suffixes":[]},{"propositions":[],"lastnames":["Shpilka"],"firstnames":["Amir"],"suffixes":[]},{"propositions":[],"lastnames":["Yehudayoff"],"firstnames":["Amir"],"suffixes":[]}],"year":"2019","month":"January","volume":"1","pages":"44–48","issn":"2522-5839","doi":"10.1038/s42256-018-0002-3","abstract":"The mathematical foundations of machine learning play a key role in the development of the field. They improve our understanding and provide tools for designing new learning paradigms. The advantages of mathematics, however, sometimes come with a cost. Gödel and Cohen showed, in a nutshell, that not everything is provable. Here we show that machine learning shares this fate. We describe simple scenarios where learnability cannot be proved nor refuted using the standard axioms of mathematics. Our proof is based on the fact the continuum hypothesis cannot be proved nor refuted. We show that, in some cases, a solution to the 'estimating the maximum' problem is equivalent to the continuum hypothesis. The main idea is to prove an equivalence between learnability and compression.","journal":"Nature Machine Intelligence","keywords":"*imported-from-citeulike-INRMM,~INRMM-MiD:c-14677845,artificial-intelligence,epistemology,limiting-factor,machine-learning,mathematics,modelling-uncertainty,software-uncertainty,unexpected-effect","lccn":"INRMM-MiD:c-14677845","number":"1","bibtex":"@article{ben-davidLearnabilityCanBe2019,\n title = {Learnability Can Be Undecidable},\n author = {{Ben-David}, Shai and Hrube{\\v s}, Pavel and Moran, Shay and Shpilka, Amir and Yehudayoff, Amir},\n year = {2019},\n month = jan,\n volume = {1},\n pages = {44--48},\n issn = {2522-5839},\n doi = {10.1038/s42256-018-0002-3},\n abstract = {The mathematical foundations of machine learning play a key role in the development of the field. They improve our understanding and provide tools for designing new learning paradigms. The advantages of mathematics, however, sometimes come with a cost. G\\\"odel and Cohen showed, in a nutshell, that not everything is provable. Here we show that machine learning shares this fate. We describe simple scenarios where learnability cannot be proved nor refuted using the standard axioms of mathematics. Our proof is based on the fact the continuum hypothesis cannot be proved nor refuted. We show that, in some cases, a solution to the 'estimating the maximum' problem is equivalent to the continuum hypothesis. The main idea is to prove an equivalence between learnability and compression.},\n journal = {Nature Machine Intelligence},\n keywords = {*imported-from-citeulike-INRMM,~INRMM-MiD:c-14677845,artificial-intelligence,epistemology,limiting-factor,machine-learning,mathematics,modelling-uncertainty,software-uncertainty,unexpected-effect},\n lccn = {INRMM-MiD:c-14677845},\n number = {1}\n}\n\n","author_short":["Ben-David, S.","Hrube ̌s, P.","Moran, S.","Shpilka, A.","Yehudayoff, A."],"key":"ben-davidLearnabilityCanBe2019","id":"ben-davidLearnabilityCanBe2019","bibbaseid":"bendavid-hrubes-moran-shpilka-yehudayoff-learnabilitycanbeundecidable-2019","role":"author","urls":{},"keyword":["*imported-from-citeulike-INRMM","~INRMM-MiD:c-14677845","artificial-intelligence","epistemology","limiting-factor","machine-learning","mathematics","modelling-uncertainty","software-uncertainty","unexpected-effect"],"downloads":0},"bibtype":"article","biburl":"https://sharefast.me/php/download.php?id=zOUKvA&token=29","creationDate":"2020-07-03T22:46:15.303Z","downloads":0,"keywords":["*imported-from-citeulike-inrmm","~inrmm-mid:c-14677845","artificial-intelligence","epistemology","limiting-factor","machine-learning","mathematics","modelling-uncertainty","software-uncertainty","unexpected-effect"],"search_terms":["learnability","undecidable","ben-david","hrube ̌s","moran","shpilka","yehudayoff"],"title":"Learnability Can Be Undecidable","year":2019,"dataSources":["5S2zj2hKW8TWTkuMq"]}