The Rules of the Game Called Psychological Science. Bakker, M., van Dijk, A., & Wicherts, J. M. Perspectives on Psychological Science, 7(6):543–554, November, 2012. Publisher: SAGE Publications IncPaper doi abstract bibtex If science were a game, a dominant rule would probably be to collect results that are statistically significant. Several reviews of the psychological literature have shown that around 96% of papers involving the use of null hypothesis significance testing report significant outcomes for their main results but that the typical studies are insufficiently powerful for such a track record. We explain this paradox by showing that the use of several small underpowered samples often represents a more efficient research strategy (in terms of finding p \textless .05) than does the use of one larger (more powerful) sample. Publication bias and the most efficient strategy lead to inflated effects and high rates of false positives, especially when researchers also resorted to questionable research practices, such as adding participants after intermediate testing. We provide simulations that highlight the severity of such biases in meta-analyses. We consider 13 meta-analyses covering 281 primary studies in various fields of psychology and find indications of biases and/or an excess of significant results in seven. These results highlight the need for sufficiently powerful replications and changes in journal policies.
@article{bakker_rules_2012,
title = {The {Rules} of the {Game} {Called} {Psychological} {Science}},
volume = {7},
issn = {1745-6916},
url = {https://doi.org/10.1177/1745691612459060},
doi = {10.1177/1745691612459060},
abstract = {If science were a game, a dominant rule would probably be to collect results that are statistically significant. Several reviews of the psychological literature have shown that around 96\% of papers involving the use of null hypothesis significance testing report significant outcomes for their main results but that the typical studies are insufficiently powerful for such a track record. We explain this paradox by showing that the use of several small underpowered samples often represents a more efficient research strategy (in terms of finding p {\textless} .05) than does the use of one larger (more powerful) sample. Publication bias and the most efficient strategy lead to inflated effects and high rates of false positives, especially when researchers also resorted to questionable research practices, such as adding participants after intermediate testing. We provide simulations that highlight the severity of such biases in meta-analyses. We consider 13 meta-analyses covering 281 primary studies in various fields of psychology and find indications of biases and/or an excess of significant results in seven. These results highlight the need for sufficiently powerful replications and changes in journal policies.},
language = {en},
number = {6},
urldate = {2021-11-08},
journal = {Perspectives on Psychological Science},
author = {Bakker, Marjan and van Dijk, Annette and Wicherts, Jelte M.},
month = nov,
year = {2012},
note = {Publisher: SAGE Publications Inc},
keywords = {false positives, power, publication bias, replication, sample size},
pages = {543--554},
}
Downloads: 0
{"_id":"3P5GRJB2E2ud4WCMg","bibbaseid":"bakker-vandijk-wicherts-therulesofthegamecalledpsychologicalscience-2012","author_short":["Bakker, M.","van Dijk, A.","Wicherts, J. M."],"bibdata":{"bibtype":"article","type":"article","title":"The Rules of the Game Called Psychological Science","volume":"7","issn":"1745-6916","url":"https://doi.org/10.1177/1745691612459060","doi":"10.1177/1745691612459060","abstract":"If science were a game, a dominant rule would probably be to collect results that are statistically significant. Several reviews of the psychological literature have shown that around 96% of papers involving the use of null hypothesis significance testing report significant outcomes for their main results but that the typical studies are insufficiently powerful for such a track record. We explain this paradox by showing that the use of several small underpowered samples often represents a more efficient research strategy (in terms of finding p \\textless .05) than does the use of one larger (more powerful) sample. Publication bias and the most efficient strategy lead to inflated effects and high rates of false positives, especially when researchers also resorted to questionable research practices, such as adding participants after intermediate testing. We provide simulations that highlight the severity of such biases in meta-analyses. We consider 13 meta-analyses covering 281 primary studies in various fields of psychology and find indications of biases and/or an excess of significant results in seven. These results highlight the need for sufficiently powerful replications and changes in journal policies.","language":"en","number":"6","urldate":"2021-11-08","journal":"Perspectives on Psychological Science","author":[{"propositions":[],"lastnames":["Bakker"],"firstnames":["Marjan"],"suffixes":[]},{"propositions":["van"],"lastnames":["Dijk"],"firstnames":["Annette"],"suffixes":[]},{"propositions":[],"lastnames":["Wicherts"],"firstnames":["Jelte","M."],"suffixes":[]}],"month":"November","year":"2012","note":"Publisher: SAGE Publications Inc","keywords":"false positives, power, publication bias, replication, sample size","pages":"543–554","bibtex":"@article{bakker_rules_2012,\n\ttitle = {The {Rules} of the {Game} {Called} {Psychological} {Science}},\n\tvolume = {7},\n\tissn = {1745-6916},\n\turl = {https://doi.org/10.1177/1745691612459060},\n\tdoi = {10.1177/1745691612459060},\n\tabstract = {If science were a game, a dominant rule would probably be to collect results that are statistically significant. Several reviews of the psychological literature have shown that around 96\\% of papers involving the use of null hypothesis significance testing report significant outcomes for their main results but that the typical studies are insufficiently powerful for such a track record. We explain this paradox by showing that the use of several small underpowered samples often represents a more efficient research strategy (in terms of finding p {\\textless} .05) than does the use of one larger (more powerful) sample. Publication bias and the most efficient strategy lead to inflated effects and high rates of false positives, especially when researchers also resorted to questionable research practices, such as adding participants after intermediate testing. We provide simulations that highlight the severity of such biases in meta-analyses. We consider 13 meta-analyses covering 281 primary studies in various fields of psychology and find indications of biases and/or an excess of significant results in seven. These results highlight the need for sufficiently powerful replications and changes in journal policies.},\n\tlanguage = {en},\n\tnumber = {6},\n\turldate = {2021-11-08},\n\tjournal = {Perspectives on Psychological Science},\n\tauthor = {Bakker, Marjan and van Dijk, Annette and Wicherts, Jelte M.},\n\tmonth = nov,\n\tyear = {2012},\n\tnote = {Publisher: SAGE Publications Inc},\n\tkeywords = {false positives, power, publication bias, replication, sample size},\n\tpages = {543--554},\n}\n\n","author_short":["Bakker, M.","van Dijk, A.","Wicherts, J. M."],"key":"bakker_rules_2012","id":"bakker_rules_2012","bibbaseid":"bakker-vandijk-wicherts-therulesofthegamecalledpsychologicalscience-2012","role":"author","urls":{"Paper":"https://doi.org/10.1177/1745691612459060"},"keyword":["false positives","power","publication bias","replication","sample size"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://api.zotero.org/groups/4504479/items?key=BfP7bN7FF9dJwtyiLBORewdg&format=bibtex&limit=100","dataSources":["bb9wnMe4gzczbyaCs"],"keywords":["false positives","power","publication bias","replication","sample size"],"search_terms":["rules","game","called","psychological","science","bakker","van dijk","wicherts"],"title":"The Rules of the Game Called Psychological Science","year":2012}