<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4504479%2Fitems%3Fkey%3DBfP7bN7FF9dJwtyiLBORewdg%26format%3Dbibtex%26limit%3D100&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4504479%2Fitems%3Fkey%3DBfP7bN7FF9dJwtyiLBORewdg%26format%3Dbibtex%26limit%3D100");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4504479%2Fitems%3Fkey%3DBfP7bN7FF9dJwtyiLBORewdg%26format%3Dbibtex%26limit%3D100"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@techreport{lin_fooled_2021, title = {Fooled by beautiful data: {Visualization} aesthetics bias trust in science, news, and social media}, shorttitle = {Fooled by beautiful data}, url = {https://psyarxiv.com/dnr9s/}, abstract = {Scientists, policymakers, and the public increasingly rely on data visualizations – such as COVID tracking charts, weather forecast maps, and political polling graphs – to inform important decisions. The aesthetic decisions of graph-makers may produce graphs of varying visual appeal, independent of data quality. Here we tested whether the beauty of a graph influences how much people trust it. Across three studies, we sampled graphs from social media, news reports, and scientific publications, and consistently found that graph beauty predicted trust. In a fourth study, we manipulated both the graph beauty and misleadingness. We found that beauty, but not actual misleadingness, causally affected trust. These findings reveal a source of bias in the interpretation of quantitative data and indicate the importance of promoting data literacy in education.}, language = {en-us}, urldate = {2022-01-24}, institution = {PsyArXiv}, author = {Lin, Chujun and Thornton, Mark Allen}, month = dec, year = {2021}, doi = {10.31234/osf.io/dnr9s}, note = {type: article}, keywords = {Aesthetics, Beauty-is-good Stereotype, Causal Effects, Data Visualizations, Psychology, Public Trust, Publication Bias, Social and Behavioral Sciences, other}, }
@inproceedings{banken_comparison_2021, address = {Cham}, series = {Lecture {Notes} in {Information} {Systems} and {Organisation}}, title = {A {Comparison} of {Crowd} {Types}: {Idea} {Selection} {Performance} of {Students} and {Amazon} {Mechanical} {Turks}}, isbn = {978-3-030-86800-0}, shorttitle = {A {Comparison} of {Crowd} {Types}}, doi = {10.1007/978-3-030-86800-0_30}, abstract = {Crowdsourcing is an effective means to generate a multitude of ideas in a very short amount of time. Therefore, companies and researchers increasingly tap into the power of the crowd for the evaluation of these ideas. However, not all types of crowds are the equally capable for complex decision-making tasks, which might result in poor selection performance. This research aims to evaluate differences in anonymous crowds and student crowds regarding their information processing, attention and selection performance. A web-experiment with 339 participants was conducted to reveal that 1) undergraduate Information Systems students perform better in idea selection than crowd workers recruited from Amazon Mechanical Turk, 2) attention checks increase selection performance and 3) while crowd workers indicate to process information more systematically, students acquire more information for evaluation than crowd workers.}, language = {en}, booktitle = {Innovation {Through} {Information} {Systems}}, publisher = {Springer International Publishing}, author = {Banken, Victoria}, editor = {Ahlemann, Frederik and Schütte, Reinhard and Stieglitz, Stefan}, year = {2021}, keywords = {Amazon Mechanical Turk, Attention, Crowd types, Crowdsourcing, Open Innovation, Student sample}, pages = {437--453}, }
@article{cheplygina_ten_2020, title = {Ten simple rules for getting started on {Twitter} as a scientist}, volume = {16}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1007513}, doi = {10.1371/journal.pcbi.1007513}, language = {en}, number = {2}, urldate = {2022-02-08}, journal = {PLOS Computational Biology}, author = {Cheplygina, Veronika and Hermans, Felienne and Albers, Casper and Bielczyk, Natalia and Smeets, Ionica}, month = feb, year = {2020}, note = {Publisher: Public Library of Science}, keywords = {Careers, Careers in research, Jobs, Scientists, Social communication, Social media, Social networks, Twitter}, pages = {e1007513}, }
@article{crameri_misuse_2020, title = {The misuse of colour in science communication}, volume = {11}, copyright = {2020 The Author(s)}, issn = {2041-1723}, url = {https://www.nature.com/articles/s41467-020-19160-7}, doi = {10.1038/s41467-020-19160-7}, abstract = {The accurate representation of data is essential in science communication. However, colour maps that visually distort data through uneven colour gradients or are unreadable to those with colour-vision deficiency remain prevalent in science. These include, but are not limited to, rainbow-like and red–green colour maps. Here, we present a simple guide for the scientific use of colour. We show how scientifically derived colour maps report true data variations, reduce complexity, and are accessible for people with colour-vision deficiencies. We highlight ways for the scientific community to identify and prevent the misuse of colour in science, and call for a proactive step away from colour misuse among the community, publishers, and the press.}, language = {en}, number = {1}, urldate = {2022-01-24}, journal = {Nature Communications}, author = {Crameri, Fabio and Shephard, Grace E. and Heron, Philip J.}, month = oct, year = {2020}, note = {Bandiera\_abtest: a Cc\_license\_type: cc\_by Cg\_type: Nature Research Journals Number: 1 Primary\_atype: Reviews Publisher: Nature Publishing Group Subject\_term: Scientific community;Software Subject\_term\_id: scientific-community;software}, keywords = {Scientific community, Software}, pages = {5444}, }
@article{nussenbaum_moving_2020, title = {Moving {Developmental} {Research} {Online}: {Comparing} {In}-{Lab} and {Web}-{Based} {Studies} of {Model}-{Based} {Reinforcement} {Learning}}, volume = {6}, issn = {2474-7394}, shorttitle = {Moving {Developmental} {Research} {Online}}, url = {https://doi.org/10.1525/collabra.17213}, doi = {10.1525/collabra.17213}, abstract = {For years, adult psychological research has benefitted from web-based data collection. There is growing interest in harnessing this approach to facilitate data collection from children and adolescents to address foundational questions about cognitive development. To date, however, few studies have directly tested whether findings from in-lab developmental psychology tasks can be replicated online, particularly in the domain of value-based learning and decision-making. To address this question, we set up a pipeline for online data collection with children, adolescents, and adults, and conducted a replication of Decker et al. (2016). The original in-lab study employed a sequential decision-making paradigm to examine shifts in value-learning strategies from childhood to adulthood. Here, we used the same paradigm in a sample of 151 children (N = 50; ages 8 - 12 years), adolescents (N = 50; ages 13 - 17 years), and adults (N = 51; ages 18 - 25 years) and replicated the main finding that the use of a “model-based” learning strategy increases with age. In addition, we adapted a new index of abstract reasoning (MaRs-IB; Chierchia et al. 2019) for use online, and replicated a key result from Potter et al. (2017), which found that abstract reasoning ability mediated the relation between age and model-based learning. Our re-analyses of two previous in-lab datasets alongside our analysis of our online dataset revealed few qualitative differences across task administrations. These findings suggest that with appropriate precautions, researchers can effectively examine developmental differences in learning computations through unmoderated, online experiments.}, number = {1}, urldate = {2022-01-11}, journal = {Collabra: Psychology}, author = {Nussenbaum, Kate and Scheuplein, Maximilian and Phaneuf, Camille V. and Evans, Michael D. and Hartley, Catherine A.}, month = nov, year = {2020}, pages = {17213}, }
@article{sauter_building_2020, title = {Building, {Hosting} and {Recruiting}: {A} {Brief} {Introduction} to {Running} {Behavioral} {Experiments} {Online}}, volume = {10}, copyright = {http://creativecommons.org/licenses/by/3.0/}, shorttitle = {Building, {Hosting} and {Recruiting}}, url = {https://www.mdpi.com/2076-3425/10/4/251}, doi = {10.3390/brainsci10040251}, abstract = {Researchers have ample reasons to take their experimental studies out of the lab and into the online wilderness. For some, it is out of necessity, due to an unforeseen laboratory closure or difficulties in recruiting on-site participants. Others want to benefit from the large and diverse online population. However, the transition from in-lab to online data acquisition is not trivial and might seem overwhelming at first. To facilitate this transition, we present an overview of actively maintained solutions for the critical components of successful online data acquisition: creating, hosting and recruiting. Our aim is to provide a brief introductory resource and discuss important considerations for researchers who are taking their first steps towards online experimentation.}, language = {en}, number = {4}, urldate = {2021-12-13}, journal = {Brain Sciences}, author = {Sauter, Marian and Draschkow, Dejan and Mack, Wolfgang}, month = apr, year = {2020}, note = {Number: 4 Publisher: Multidisciplinary Digital Publishing Institute}, keywords = {behavioral sciences, online experiments, online methods, remote testing}, pages = {251}, }
@article{botvinik-nezer_variability_2020, title = {Variability in the analysis of a single neuroimaging dataset by many teams}, volume = {582}, copyright = {2020 The Author(s), under exclusive licence to Springer Nature Limited}, issn = {1476-4687}, url = {https://www.nature.com/articles/s41586-020-2314-9}, doi = {10.1038/s41586-020-2314-9}, abstract = {Data analysis workflows in many scientific domains have become increasingly complex and flexible. Here we assess the effect of this flexibility on the results of functional magnetic resonance imaging by asking 70 independent teams to analyse the same dataset, testing the same 9 ex-ante hypotheses1. The flexibility of analytical approaches is exemplified by the fact that no two teams chose identical workflows to analyse the data. This flexibility resulted in sizeable variation in the results of hypothesis tests, even for teams whose statistical maps were highly correlated at intermediate stages of the analysis pipeline. Variation in reported results was related to several aspects of analysis methodology. Notably, a meta-analytical approach that aggregated information across teams yielded a significant consensus in activated regions. Furthermore, prediction markets of researchers in the field revealed an overestimation of the likelihood of significant findings, even by researchers with direct knowledge of the dataset2–5. Our findings show that analytical flexibility can have substantial effects on scientific conclusions, and identify factors that may be related to variability in the analysis of functional magnetic resonance imaging. The results emphasize the importance of validating and sharing complex analysis workflows, and demonstrate the need for performing and reporting multiple analyses of the same data. Potential approaches that could be used to mitigate issues related to analytical variability are discussed.}, language = {en}, number = {7810}, urldate = {2021-11-08}, journal = {Nature}, author = {Botvinik-Nezer, Rotem and Holzmeister, Felix and Camerer, Colin F. and Dreber, Anna and Huber, Juergen and Johannesson, Magnus and Kirchler, Michael and Iwanir, Roni and Mumford, Jeanette A. and Adcock, R. Alison and Avesani, Paolo and Baczkowski, Blazej M. and Bajracharya, Aahana and Bakst, Leah and Ball, Sheryl and Barilari, Marco and Bault, Nadège and Beaton, Derek and Beitner, Julia and Benoit, Roland G. and Berkers, Ruud M. W. J. and Bhanji, Jamil P. and Biswal, Bharat B. and Bobadilla-Suarez, Sebastian and Bortolini, Tiago and Bottenhorn, Katherine L. and Bowring, Alexander and Braem, Senne and Brooks, Hayley R. and Brudner, Emily G. and Calderon, Cristian B. and Camilleri, Julia A. and Castrellon, Jaime J. and Cecchetti, Luca and Cieslik, Edna C. and Cole, Zachary J. and Collignon, Olivier and Cox, Robert W. and Cunningham, William A. and Czoschke, Stefan and Dadi, Kamalaker and Davis, Charles P. and Luca, Alberto De and Delgado, Mauricio R. and Demetriou, Lysia and Dennison, Jeffrey B. and Di, Xin and Dickie, Erin W. and Dobryakova, Ekaterina and Donnat, Claire L. and Dukart, Juergen and Duncan, Niall W. and Durnez, Joke and Eed, Amr and Eickhoff, Simon B. and Erhart, Andrew and Fontanesi, Laura and Fricke, G. Matthew and Fu, Shiguang and Galván, Adriana and Gau, Remi and Genon, Sarah and Glatard, Tristan and Glerean, Enrico and Goeman, Jelle J. and Golowin, Sergej A. E. and González-García, Carlos and Gorgolewski, Krzysztof J. and Grady, Cheryl L. and Green, Mikella A. and Guassi Moreira, João F. and Guest, Olivia and Hakimi, Shabnam and Hamilton, J. Paul and Hancock, Roeland and Handjaras, Giacomo and Harry, Bronson B. and Hawco, Colin and Herholz, Peer and Herman, Gabrielle and Heunis, Stephan and Hoffstaedter, Felix and Hogeveen, Jeremy and Holmes, Susan and Hu, Chuan-Peng and Huettel, Scott A. and Hughes, Matthew E. and Iacovella, Vittorio and Iordan, Alexandru D. and Isager, Peder M. and Isik, Ayse I. and Jahn, Andrew and Johnson, Matthew R. and Johnstone, Tom and Joseph, Michael J. E. and Juliano, Anthony C. and Kable, Joseph W. and Kassinopoulos, Michalis and Koba, Cemal and Kong, Xiang-Zhen and Koscik, Timothy R. and Kucukboyaci, Nuri Erkut and Kuhl, Brice A. and Kupek, Sebastian and Laird, Angela R. and Lamm, Claus and Langner, Robert and Lauharatanahirun, Nina and Lee, Hongmi and Lee, Sangil and Leemans, Alexander and Leo, Andrea and Lesage, Elise and Li, Flora and Li, Monica Y. C. and Lim, Phui Cheng and Lintz, Evan N. and Liphardt, Schuyler W. and Losecaat Vermeer, Annabel B. and Love, Bradley C. and Mack, Michael L. and Malpica, Norberto and Marins, Theo and Maumet, Camille and McDonald, Kelsey and McGuire, Joseph T. and Melero, Helena and Méndez Leal, Adriana S. and Meyer, Benjamin and Meyer, Kristin N. and Mihai, Glad and Mitsis, Georgios D. and Moll, Jorge and Nielson, Dylan M. and Nilsonne, Gustav and Notter, Michael P. and Olivetti, Emanuele and Onicas, Adrian I. and Papale, Paolo and Patil, Kaustubh R. and Peelle, Jonathan E. and Pérez, Alexandre and Pischedda, Doris and Poline, Jean-Baptiste and Prystauka, Yanina and Ray, Shruti and Reuter-Lorenz, Patricia A. and Reynolds, Richard C. and Ricciardi, Emiliano and Rieck, Jenny R. and Rodriguez-Thompson, Anais M. and Romyn, Anthony and Salo, Taylor and Samanez-Larkin, Gregory R. and Sanz-Morales, Emilio and Schlichting, Margaret L. and Schultz, Douglas H. and Shen, Qiang and Sheridan, Margaret A. and Silvers, Jennifer A. and Skagerlund, Kenny and Smith, Alec and Smith, David V. and Sokol-Hessner, Peter and Steinkamp, Simon R. and Tashjian, Sarah M. and Thirion, Bertrand and Thorp, John N. and Tinghög, Gustav and Tisdall, Loreen and Tompson, Steven H. and Toro-Serey, Claudio and Torre Tresols, Juan Jesus and Tozzi, Leonardo and Truong, Vuong and Turella, Luca and van ‘t Veer, Anna E. and Verguts, Tom and Vettel, Jean M. and Vijayarajah, Sagana and Vo, Khoi and Wall, Matthew B. and Weeda, Wouter D. and Weis, Susanne and White, David J. and Wisniewski, David and Xifra-Porxas, Alba and Yearling, Emily A. and Yoon, Sangsuk and Yuan, Rui and Yuen, Kenneth S. L. and Zhang, Lei and Zhang, Xu and Zosky, Joshua E. and Nichols, Thomas E. and Poldrack, Russell A. and Schonberg, Tom}, month = jun, year = {2020}, note = {Bandiera\_abtest: a Cg\_type: Nature Research Journals Number: 7810 Primary\_atype: Research Publisher: Nature Publishing Group Subject\_term: Decision;Decision making;Human behaviour;Scientific community Subject\_term\_id: decision;decision-making;human-behaviour;scientific-community}, keywords = {Decision, Decision making, Human behaviour, Scientific community}, pages = {84--88}, }
@article{carey_ten_2020, title = {Ten simple rules for reading a scientific paper}, volume = {16}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1008032}, doi = {10.1371/journal.pcbi.1008032}, language = {en}, number = {7}, urldate = {2021-11-08}, journal = {PLOS Computational Biology}, author = {Carey, Maureen A. and Steiner, Kevin L. and Jr, William A. Petri}, month = jul, year = {2020}, note = {Publisher: Public Library of Science}, keywords = {Careers, Graduates, Habits, Human learning, Research reporting guidelines, Scientists, Textbooks, Undergraduates}, pages = {e1008032}, }
@article{nunez_optimizing_2018, title = {Optimizing colormaps with consideration for color vision deficiency to enable accurate interpretation of scientific data}, volume = {13}, issn = {1932-6203}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0199239}, doi = {10.1371/journal.pone.0199239}, abstract = {Color vision deficiency (CVD) affects more than 4\% of the population and leads to a different visual perception of colors. Though this has been known for decades, colormaps with many colors across the visual spectra are often used to represent data, leading to the potential for misinterpretation or difficulty with interpretation by someone with this deficiency. Until the creation of the module presented here, there were no colormaps mathematically optimized for CVD using modern color appearance models. While there have been some attempts to make aesthetically pleasing or subjectively tolerable colormaps for those with CVD, our goal was to make optimized colormaps for the most accurate perception of scientific data by as many viewers as possible. We developed a Python module, cmaputil, to create CVD-optimized colormaps, which imports colormaps and modifies them to be perceptually uniform in CVD-safe colorspace while linearizing and maximizing the brightness range. The module is made available to the science community to enable others to easily create their own CVD-optimized colormaps. Here, we present an example CVD-optimized colormap created with this module that is optimized for viewing by those without a CVD as well as those with red-green colorblindness. This colormap, cividis, enables nearly-identical visual-data interpretation to both groups, is perceptually uniform in hue and brightness, and increases in brightness linearly.}, language = {en}, number = {7}, urldate = {2022-01-24}, journal = {PLOS ONE}, author = {Nuñez, Jamie R. and Anderton, Christopher R. and Renslow, Ryan S.}, month = aug, year = {2018}, note = {Publisher: Public Library of Science}, keywords = {Color vision, Computer software, Fluid flow, Linear regression analysis, Secondary ion mass spectrometry, Sensory perception, Sine waves, Vision}, pages = {e0199239}, }
@article{semmelmann_online_2017, title = {Online psychophysics: reaction time effects in cognitive experiments}, volume = {49}, issn = {1554-3528}, shorttitle = {Online psychophysics}, url = {https://doi.org/10.3758/s13428-016-0783-4}, doi = {10.3758/s13428-016-0783-4}, abstract = {Using the Internet to acquire behavioral data is currently on the rise. However, very basic questions regarding the feasibility of online psychophysics are still open. Here, we aimed to replicate five well-known paradigms in experimental psychology (Stroop, Flanker, visual search, masked priming, attentional blink) in three settings (classical “lab”, “web-in-lab”, “web”) to account for possible changes in technology and environment. Lab and web-in-lab data were both acquired in an in-lab setting with lab using “Gold Standard” methods, while web-in-lab used web technology. This allowed for a direct comparison of potential differences in acquisition software. To account for additional environmental differences, the web technology experiments were published online to participate from home (setting web), thereby keeping the software and experimental design identical and only changing the environmental setting. Our main results are: First, we found an expected fixed additive timing offset when using web technology (M = 37 ms, SD = 8.14) and recording online (M = 87 ms, SD = 16.04) in comparison to lab data. Second, all task-specific effects were reproduced except for the priming paradigm, which couldn’t be replicated in any setting. Third, there were no differences in error rates, which are independent of the timing offset. This finding further supports the assumption of data equality over all settings. Fourth, we found that browser type might be influencing absolute reaction times. Together, these results contribute to the slowly but steadily growing literature that online psychophysics is a suitable complement – or even substitute – to lab data acquisition.}, language = {en}, number = {4}, urldate = {2022-01-11}, journal = {Behavior Research Methods}, author = {Semmelmann, Kilian and Weigelt, Sarah}, month = aug, year = {2017}, pages = {1241--1260}, }
@article{noble_ten_2017, title = {Ten simple rules for writing a response to reviewers}, volume = {13}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005730}, doi = {10.1371/journal.pcbi.1005730}, language = {en}, number = {10}, urldate = {2021-11-22}, journal = {PLOS Computational Biology}, author = {Noble, William Stafford}, month = oct, year = {2017}, note = {Publisher: Public Library of Science}, keywords = {Conflicts of interest, Emotions, Peer review}, pages = {e1005730}, }
@article{crede_revisiting_2017, title = {Revisiting the {Power} {Pose} {Effect}: {How} {Robust} {Are} the {Results} {Reported} by {Carney}, {Cuddy}, and {Yap} (2010) to {Data} {Analytic} {Decisions}?}, volume = {8}, issn = {1948-5506}, shorttitle = {Revisiting the {Power} {Pose} {Effect}}, url = {https://doi.org/10.1177/1948550617714584}, doi = {10.1177/1948550617714584}, abstract = {The literature on the impact of expansive poses on biological and psychological variables is characterized by discrepant findings. These discrepant findings may, in part, be a function of differences in how data were analyzed. In this article, we use multiverse analysis to examine whether the findings reported in the original paper by Carney, Cuddy, and Yap are robust to plausible alternative data analytic specifications: outlier identification strategy, the specification of the dependent variable, and the use of control variables. Our findings indicate that the inferences regarding the presence and size of an effect on testosterone and cortisol are highly sensitive to data analytic specifications. We encourage researchers to routinely explore the influence of data analytic choices on statistical inferences and also encourage editors and reviewers to require explicit examinations of the influence of alternative data analytic specifications on the inferences that are drawn from data.}, language = {en}, number = {5}, urldate = {2021-11-08}, journal = {Social Psychological and Personality Science}, author = {Credé, Marcus and Phillips, Leigh A.}, month = jul, year = {2017}, note = {Publisher: SAGE Publications Inc}, keywords = {expansive pose, multiverse analysis, p-hacking, power pose, researcher degrees of freedom}, pages = {493--499}, }
@article{abele-brehm_wer_2016, title = {Wer soll die {Professur} bekommen?}, volume = {67}, issn = {0033-3042}, url = {https://econtent.hogrefe.com/doi/full/10.1026/0033-3042/a000335}, doi = {10.1026/0033-3042/a000335}, abstract = {Zusammenfassung. Die Entwicklung einer Wissenschaft ist abhängig von den Personen, die sie tragen. Der Auswahl geeigneter Personen in Berufungsverfahren auf Professuren kommt deshalb eine besondere Bedeutung zu. Die vorliegende Studie beschäftigt sich erstmals damit, wie Kolleginnen und Kollegen der Psychologie Berufungsverfahren beurteilen; wie wichtig sie verschiedene Indikatoren für die Eignung auf eine Professur einschätzen; wie hoch die Diskrepanz zwischen gewünschter und tatsächlicher Relevanz dieser Indikatoren ist; sowie wie sie zu verschiedenen Ausgestaltungsmöglichkeiten von Berufungsverfahren stehen. Es wurden 3.784 Mitglieder der DGPs angeschrieben, um an einer online Befragung teilzunehmen. N = 1.453 Personen beantworteten zumindest einen Teil der Fragen. Die Ergebnisse zeigen, dass die Diskrepanzen zwischen Ist und Soll bei überfachlichen Kompetenzen (Kommunikation, Kooperation, strategisches Denken) besonders groß sind und dass die Befragten den Stellenwert quantitativer Forschungsleistungsindikatoren als zu hoch ansehen. Die Befragten befürworten den Einsatz strukturierter Interviews zur Erfassung überfachlicher Kompetenzen, eine multi-methodale Messung der Forschungs- und Lehrleistung durch qualitative und quantitative Indikatoren sowie stärker strukturierte Probelehrvorträge. Mögliche fachpolitische Konsequenzen dieser Befunde werden diskutiert.}, number = {4}, urldate = {2021-11-08}, journal = {Psychologische Rundschau}, author = {Abele-Brehm, Andrea E. and Bühner, Markus}, month = oct, year = {2016}, note = {Publisher: Hogrefe Verlag}, keywords = {Berufungsverfahren, Eignungskriterien, hiring procedures for professors, indicators of suitability, quantitative and qualitative research performance, quantitative und qualitative Indikatoren der Forschungsleistung, social and management skills, soziale und Management Fertigkeiten}, pages = {250--261}, }
@article{kovesi_good_2015, title = {Good {Colour} {Maps}: {How} to {Design} {Them}}, shorttitle = {Good {Colour} {Maps}}, url = {http://arxiv.org/abs/1509.03700}, abstract = {Many colour maps provided by vendors have highly uneven perceptual contrast over their range. It is not uncommon for colour maps to have perceptual flat spots that can hide a feature as large as one tenth of the total data range. Colour maps may also have perceptual discontinuities that induce the appearance of false features. Previous work in the design of perceptually uniform colour maps has mostly failed to recognise that CIELAB space is only designed to be perceptually uniform at very low spatial frequencies. The most important factor in designing a colour map is to ensure that the magnitude of the incremental change in perceptual lightness of the colours is uniform. The specific requirements for linear, diverging, rainbow and cyclic colour maps are developed in detail. To support this work two test images for evaluating colour maps are presented. The use of colour maps in combination with relief shading is considered and the conditions under which colour can enhance or disrupt relief shading are identified. Finally, a set of new basis colours for the construction of ternary images are presented. Unlike the RGB primaries these basis colours produce images whereby the salience of structures are consistent irrespective of the assignment of basis colours to data channels.}, urldate = {2022-01-24}, journal = {arXiv:1509.03700 [cs]}, author = {Kovesi, Peter}, month = sep, year = {2015}, note = {arXiv: 1509.03700}, keywords = {Computer Science - Graphics, I.3.3}, }
@inproceedings{reinecke_labinthewild_2015, address = {New York, NY, USA}, series = {{CSCW} '15}, title = {{LabintheWild}: {Conducting} {Large}-{Scale} {Online} {Experiments} {With} {Uncompensated} {Samples}}, isbn = {978-1-4503-2922-4}, shorttitle = {{LabintheWild}}, url = {https://doi.org/10.1145/2675133.2675246}, doi = {10.1145/2675133.2675246}, abstract = {Web-based experimentation with uncompensated and unsupervised samples has the potential to support the replication, verification, extension and generation of new results with larger and more diverse sample populations than previously seen. We introduce the experimental online platform LabintheWild, which provides participants with personalized feedback in exchange for participation in behavioral studies. In comparison to conventional in-lab studies, LabintheWild enables the recruitment of participants at larger scale and from more diverse demographic and geographic backgrounds. We analyze Google Analytics data, participants' comments, and tweets to discuss how participants hear about the platform, and why they might choose to participate. Analyzing three example experiments, we additionally show that these experiments replicate previous in-lab study results with comparable data quality.}, urldate = {2022-01-11}, booktitle = {Proceedings of the 18th {ACM} {Conference} on {Computer} {Supported} {Cooperative} {Work} \& {Social} {Computing}}, publisher = {Association for Computing Machinery}, author = {Reinecke, Katharina and Gajos, Krzysztof Z.}, month = feb, year = {2015}, keywords = {crowdsourcing, online experimentation, replication, social comparison, uncompensated samples, weird}, pages = {1364--1378}, }
@article{michener_ten_2015, title = {Ten {Simple} {Rules} for {Creating} a {Good} {Data} {Management} {Plan}}, volume = {11}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004525}, doi = {10.1371/journal.pcbi.1004525}, language = {en}, number = {10}, urldate = {2021-11-16}, journal = {PLOS Computational Biology}, author = {Michener, William K.}, month = oct, year = {2015}, note = {Publisher: Public Library of Science}, keywords = {Archives, Biological data management, Computer software, Data management, Data visualization, Life cycles, Metadata, Science policy}, pages = {e1004525}, }
@article{kaplan_likelihood_2015, title = {Likelihood of {Null} {Effects} of {Large} {NHLBI} {Clinical} {Trials} {Has} {Increased} over {Time}}, volume = {10}, issn = {1932-6203}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0132382}, doi = {10.1371/journal.pone.0132382}, abstract = {Background We explore whether the number of null results in large National Heart Lung, and Blood Institute (NHLBI) funded trials has increased over time. Methods We identified all large NHLBI supported RCTs between 1970 and 2012 evaluating drugs or dietary supplements for the treatment or prevention of cardiovascular disease. Trials were included if direct costs {\textgreater}\$500,000/year, participants were adult humans, and the primary outcome was cardiovascular risk, disease or death. The 55 trials meeting these criteria were coded for whether they were published prior to or after the year 2000, whether they registered in clinicaltrials.gov prior to publication, used active or placebo comparator, and whether or not the trial had industry co-sponsorship. We tabulated whether the study reported a positive, negative, or null result on the primary outcome variable and for total mortality. Results 17 of 30 studies (57\%) published prior to 2000 showed a significant benefit of intervention on the primary outcome in comparison to only 2 among the 25 (8\%) trials published after 2000 (χ2=12.2,df= 1, p=0.0005). There has been no change in the proportion of trials that compared treatment to placebo versus active comparator. Industry co-sponsorship was unrelated to the probability of reporting a significant benefit. Pre-registration in clinical trials.gov was strongly associated with the trend toward null findings. Conclusions The number NHLBI trials reporting positive results declined after the year 2000. Prospective declaration of outcomes in RCTs, and the adoption of transparent reporting standards, as required by clinicaltrials.gov, may have contributed to the trend toward null findings.}, language = {en}, number = {8}, urldate = {2021-11-08}, journal = {PLOS ONE}, author = {Kaplan, Robert M. and Irvin, Veronica L.}, month = aug, year = {2015}, note = {Publisher: Public Library of Science}, keywords = {Cardiovascular diseases, Cardiovascular therapy, Coronary heart disease, Drug therapy, Myocardial infarction, Randomized controlled trials, Sudden cardiac death, Women's health}, pages = {e0132382}, }
@article{open_science_collaboration_estimating_2015, title = {Estimating the reproducibility of psychological science}, volume = {349}, url = {https://www.science.org/doi/10.1126/science.aac4716}, doi = {10.1126/science.aac4716}, number = {6251}, urldate = {2021-11-08}, journal = {Science}, author = {{OPEN SCIENCE COLLABORATION}}, month = aug, year = {2015}, note = {Publisher: American Association for the Advancement of Science}, pages = {aac4716}, }
@article{vinkers_use_2015, title = {Use of positive and negative words in scientific {PubMed} abstracts between 1974 and 2014: retrospective analysis}, volume = {351}, copyright = {Published by the BMJ Publishing Group Limited. For permission to use (where not already granted under a licence) please go to http://group.bmj.com/group/rights-licensing/permissions. This is an Open Access article distributed in accordance with the Creative Commons Attribution Non Commercial (CC BY-NC 3.0) license, which permits others to distribute, remix, adapt, build upon this work non-commercially, and license their derivative works on different terms, provided the original work is properly cited and the use is non-commercial. See: http://creativecommons.org/licenses/by-nc/3.0/.}, issn = {1756-1833}, shorttitle = {Use of positive and negative words in scientific {PubMed} abstracts between 1974 and 2014}, url = {https://www.bmj.com/content/351/bmj.h6467}, doi = {10.1136/bmj.h6467}, abstract = {Objective To investigate whether language used in science abstracts can skew towards the use of strikingly positive and negative words over time. Design Retrospective analysis of all scientific abstracts in PubMed between 1974 and 2014. Methods The yearly frequencies of positive, negative, and neutral words (25 preselected words in each category), plus 100 randomly selected words were normalised for the total number of abstracts. Subanalyses included pattern quantification of individual words, specificity for selected high impact journals, and comparison between author affiliations within or outside countries with English as the official majority language. Frequency patterns were compared with 4\% of all books ever printed and digitised by use of Google Books Ngram Viewer. Main outcome measures Frequencies of positive and negative words in abstracts compared with frequencies of words with a neutral and random connotation, expressed as relative change since 1980. Results The absolute frequency of positive words increased from 2.0\% (1974-80) to 17.5\% (2014), a relative increase of 880\% over four decades. All 25 individual positive words contributed to the increase, particularly the words “robust,” “novel,” “innovative,” and “unprecedented,” which increased in relative frequency up to 15 000\%. Comparable but less pronounced results were obtained when restricting the analysis to selected journals with high impact factors. Authors affiliated to an institute in a non-English speaking country used significantly more positive words. Negative word frequencies increased from 1.3\% (1974-80) to 3.2\% (2014), a relative increase of 257\%. Over the same time period, no apparent increase was found in neutral or random word use, or in the frequency of positive word use in published books. Conclusions Our lexicographic analysis indicates that scientific abstracts are currently written with more positive and negative words, and provides an insight into the evolution of scientific writing. Apparently scientists look on the bright side of research results. But whether this perception fits reality should be questioned.}, language = {en}, urldate = {2021-11-08}, journal = {BMJ}, author = {Vinkers, Christiaan H. and Tijdink, Joeri K. and Otte, Willem M.}, month = dec, year = {2015}, pmid = {26668206}, note = {Publisher: British Medical Journal Publishing Group Section: Research}, pages = {h6467}, }
@article{zhang_ten_2014, title = {Ten {Simple} {Rules} for {Writing} {Research} {Papers}}, volume = {10}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003453}, doi = {10.1371/journal.pcbi.1003453}, language = {en}, number = {1}, urldate = {2021-11-08}, journal = {PLOS Computational Biology}, author = {Zhang, Weixiong}, month = jan, year = {2014}, note = {Publisher: Public Library of Science}, keywords = {Biologists, Careers, Careers in research, DNA structure, Language, Life cycles, Research design, Scientists}, pages = {e1003453}, }
@article{noauthor_how_2013, title = {How science goes wrong}, issn = {0013-0613}, url = {https://www.economist.com/leaders/2013/10/21/how-science-goes-wrong}, abstract = {Scientific research has changed the world. Now it needs to change itself}, urldate = {2021-11-08}, journal = {The Economist}, month = oct, year = {2013}, }
@article{john_measuring_2012, title = {Measuring the {Prevalence} of {Questionable} {Research} {Practices} {With} {Incentives} for {Truth} {Telling}}, volume = {23}, issn = {0956-7976}, url = {https://doi.org/10.1177/0956797611430953}, doi = {10.1177/0956797611430953}, abstract = {Cases of clear scientific misconduct have received significant media attention recently, but less flagrantly questionable research practices may be more prevalent and, ultimately, more damaging to the academic enterprise. Using an anonymous elicitation format supplemented by incentives for honest reporting, we surveyed over 2,000 psychologists about their involvement in questionable research practices. The impact of truth-telling incentives on self-admissions of questionable research practices was positive, and this impact was greater for practices that respondents judged to be less defensible. Combining three different estimation methods, we found that the percentage of respondents who have engaged in questionable practices was surprisingly high. This finding suggests that some questionable practices may constitute the prevailing research norm.}, language = {en}, number = {5}, urldate = {2021-11-08}, journal = {Psychological Science}, author = {John, Leslie K. and Loewenstein, George and Prelec, Drazen}, month = may, year = {2012}, note = {Publisher: SAGE Publications Inc}, keywords = {disclosure, judgment, methodology, professional standards}, pages = {524--532}, }
@article{bakker_rules_2012, title = {The {Rules} of the {Game} {Called} {Psychological} {Science}}, volume = {7}, issn = {1745-6916}, url = {https://doi.org/10.1177/1745691612459060}, doi = {10.1177/1745691612459060}, abstract = {If science were a game, a dominant rule would probably be to collect results that are statistically significant. Several reviews of the psychological literature have shown that around 96\% of papers involving the use of null hypothesis significance testing report significant outcomes for their main results but that the typical studies are insufficiently powerful for such a track record. We explain this paradox by showing that the use of several small underpowered samples often represents a more efficient research strategy (in terms of finding p {\textless} .05) than does the use of one larger (more powerful) sample. Publication bias and the most efficient strategy lead to inflated effects and high rates of false positives, especially when researchers also resorted to questionable research practices, such as adding participants after intermediate testing. We provide simulations that highlight the severity of such biases in meta-analyses. We consider 13 meta-analyses covering 281 primary studies in various fields of psychology and find indications of biases and/or an excess of significant results in seven. These results highlight the need for sufficiently powerful replications and changes in journal policies.}, language = {en}, number = {6}, urldate = {2021-11-08}, journal = {Perspectives on Psychological Science}, author = {Bakker, Marjan and van Dijk, Annette and Wicherts, Jelte M.}, month = nov, year = {2012}, note = {Publisher: SAGE Publications Inc}, keywords = {false positives, power, publication bias, replication, sample size}, pages = {543--554}, }
@article{simmons_false-positive_2011, title = {False-{Positive} {Psychology}: {Undisclosed} {Flexibility} in {Data} {Collection} and {Analysis} {Allows} {Presenting} {Anything} as {Significant}}, volume = {22}, issn = {0956-7976}, shorttitle = {False-{Positive} {Psychology}}, url = {https://doi.org/10.1177/0956797611417632}, doi = {10.1177/0956797611417632}, abstract = {In this article, we accomplish two things. First, we show that despite empirical psychologists’ nominal endorsement of a low rate of false-positive findings (≤ .05), flexibility in data collection, analysis, and reporting dramatically increases actual false-positive rates. In many cases, a researcher is more likely to falsely find evidence that an effect exists than to correctly find evidence that it does not. We present computer simulations and a pair of actual experiments that demonstrate how unacceptably easy it is to accumulate (and report) statistically significant evidence for a false hypothesis. Second, we suggest a simple, low-cost, and straightforwardly effective disclosure-based solution to this problem. The solution involves six concrete requirements for authors and four guidelines for reviewers, all of which impose a minimal burden on the publication process.}, language = {en}, number = {11}, urldate = {2021-11-08}, journal = {Psychological Science}, author = {Simmons, Joseph P. and Nelson, Leif D. and Simonsohn, Uri}, month = nov, year = {2011}, note = {Publisher: SAGE Publications Inc}, keywords = {disclosure, methodology, motivated reasoning, publication}, pages = {1359--1366}, }
@article{wicherts_willingness_2011, title = {Willingness to {Share} {Research} {Data} {Is} {Related} to the {Strength} of the {Evidence} and the {Quality} of {Reporting} of {Statistical} {Results}}, volume = {6}, issn = {1932-6203}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0026828}, doi = {10.1371/journal.pone.0026828}, abstract = {Background The widespread reluctance to share published research data is often hypothesized to be due to the authors' fear that reanalysis may expose errors in their work or may produce conclusions that contradict their own. However, these hypotheses have not previously been studied systematically. Methods and Findings We related the reluctance to share research data for reanalysis to 1148 statistically significant results reported in 49 papers published in two major psychology journals. We found the reluctance to share data to be associated with weaker evidence (against the null hypothesis of no effect) and a higher prevalence of apparent errors in the reporting of statistical results. The unwillingness to share data was particularly clear when reporting errors had a bearing on statistical significance. Conclusions Our findings on the basis of psychological papers suggest that statistical results are particularly hard to verify when reanalysis is more likely to lead to contrasting conclusions. This highlights the importance of establishing mandatory data archiving policies.}, language = {en}, number = {11}, urldate = {2021-11-08}, journal = {PLOS ONE}, author = {Wicherts, Jelte M. and Bakker, Marjan and Molenaar, Dylan}, month = nov, year = {2011}, note = {Publisher: Public Library of Science}, keywords = {Data management, Experimental design, Medical journals, Psychologists, Psychology, Statistical data, Statistical distributions, Test statistics}, pages = {e26828}, }
@article{fanelli_negative_2011, title = {Negative results are disappearing from most disciplines and countries}, volume = {90}, issn = {0138-9130, 1588-2861}, url = {https://akjournals.com/view/journals/11192/90/3/article-p891.xml}, doi = {10.1007/s11192-011-0494-7}, abstract = {Abstract Concerns that the growing competition for funding and citations might distort science are frequently discussed, but have not been verified directly. Of the hypothesized problems, perhaps the most worrying is a worsening of positive-outcome bias. A system that disfavours negative results not only distorts the scientific literature directly, but might also discourage high-risk projects and pressure scientists to fabricate and falsify their data. This study analysed over 4,600 papers published in all disciplines between 1990 and 2007, measuring the frequency of papers that, having declared to have “tested” a hypothesis, reported a positive support for it. The overall frequency of positive supports has grown by over 22\% between 1990 and 2007, with significant differences between disciplines and countries. The increase was stronger in the social and some biomedical disciplines. The United States had published, over the years, significantly fewer positive results than Asian countries (and particularly Japan) but more than European countries (and in particular the United Kingdom). Methodological artefacts cannot explain away these patterns, which support the hypotheses that research is becoming less pioneering and/or that the objectivity with which results are produced and published is decreasing.}, language = {en\_US}, number = {3}, urldate = {2021-11-08}, journal = {Scientometrics}, author = {Fanelli, Daniele}, month = sep, year = {2011}, note = {Publisher: Akadémiai Kiadó, co-published with Springer Science+Business Media B.V., Formerly Kluwer Academic Publishers B.V. Section: Scientometrics}, pages = {891--904}, }
@article{bourne_ten_2006, title = {Ten {Simple} {Rules} for {Reviewers}}, volume = {2}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.0020110}, doi = {10.1371/journal.pcbi.0020110}, language = {en}, number = {9}, urldate = {2021-11-08}, journal = {PLOS Computational Biology}, author = {Bourne, Philip E. and Korngreen, Alon}, month = sep, year = {2006}, note = {Publisher: Public Library of Science}, keywords = {Careers, Careers in research, Citation analysis, Computational biology, Conflicts of interest, Research reporting guidelines, Scientific misconduct, Scientific publishing}, pages = {e110}, }
@incollection{marin_17_1999, address = {San Diego}, series = {Cognition and {Perception}}, title = {17 - {Neurological} {Aspects} of {Music} {Perception} and {Performance}}, isbn = {978-0-12-213564-4}, url = {https://www.sciencedirect.com/science/article/pii/B9780122135644500184}, abstract = {This chapter explores the musical aspects of music perception and performance. The chapter suggests that any article dealing with the neurology and neuropsychology of music should include two levels of inquiry: (1) description of the clinical deficits in music perception or performance resulting from localized or diffuse damage to the nervous system; (2) analysis of normal and abnormal psychological and physiological functions. The aim is to determine the principles and modes by which the human brain processes, codifies, stores, and produces music. The resulting hypotheses, part of the domain now called cognitive neuroscience, can best guide the empirical investigation and analysis of clinical deficits in music perception and performance, and the investigation of anatomical-functional correlations via noninvasive brain imaging methods in humans. The clinical characteristics of disorders of musical function are described. Despite the awareness, progress in the field of the neurology of music has been slow..}, language = {en}, urldate = {2021-11-10}, booktitle = {The {Psychology} of {Music} ({Second} {Edition})}, publisher = {Academic Press}, author = {Marin, Oscar S. M. and Perry, David W.}, editor = {Deutsch, Diana}, month = jan, year = {1999}, doi = {10.1016/B978-012213564-4/50018-4}, pages = {653--724}, }
@book{poldrack_statistical_nodate, title = {Statistical {Thinking} for the 21st {Century}}, url = {https://statsthinking21.github.io/statsthinking21-core-site/index.html}, abstract = {A book about statistics.}, urldate = {2022-02-08}, author = {Poldrack, Copyright 2019 Russell A.}, }
@misc{noauthor_intersectional_nodate, title = {Intersectional {Inequalities}}, url = {https://sciencebias.uni.lu/app/}, urldate = {2022-01-05}, }
@article{gelman_garden_nodate, title = {The garden of forking paths: {Why} multiple comparisons can be a problem, even when there is no “fishing expedition” or “p-hacking” and the research hypothesis was posited ahead of time}, abstract = {Researcher degrees of freedom can lead to a multiple comparisons problem, even in settings where researchers perform only a single analysis on their data. The problem is there can be a large number of potential comparisons when the details of data analysis are highly contingent on data, without the researcher having to perform any conscious procedure of fishing or examining multiple p-values. We discuss in the context of several examples of published papers where data-analysis decisions were theoretically-motivated based on previous literature, but where the details of data selection and analysis were not pre-specified and, as a result, were contingent on data.}, language = {en}, author = {Gelman, Andrew and Loken, Eric}, pages = {17}, }
@article{marsman_bayesian_nodate, title = {A {Bayesian} bird's eye view of ‘{Replications} of important results in social psychology’}, volume = {4}, url = {https://royalsocietypublishing.org/doi/full/10.1098/rsos.160426}, doi = {10.1098/rsos.160426}, abstract = {We applied three Bayesian methods to reanalyse the preregistered contributions to the Social Psychology special issue ‘Replications of Important Results in Social Psychology’ (Nosek \& Lakens. 2014 Registered reports: a method to increase the credibility of published results. Soc. Psychol.45, 137–141. (doi:10.1027/1864-9335/a000192)). First, individual-experiment Bayesian parameter estimation revealed that for directed effect size measures, only three out of 44 central 95\% credible intervals did not overlap with zero and fell in the expected direction. For undirected effect size measures, only four out of 59 credible intervals contained values greater than 0.10 0.10 (10\% of variance explained) and only 19 intervals contained values larger than 0.05 0.05 . Second, a Bayesian random-effects meta-analysis for all 38 t-tests showed that only one out of the 38 hierarchically estimated credible intervals did not overlap with zero and fell in the expected direction. Third, a Bayes factor hypothesis test was used to quantify the evidence for the null hypothesis against a default one-sided alternative. Only seven out of 60 Bayes factors indicated non-anecdotal support in favour of the alternative hypothesis ( BF 10 {\textgreater}3 BF10{\textgreater}3 ), whereas 51 Bayes factors indicated at least some support for the null hypothesis. We hope that future analyses of replication success will embrace a more inclusive statistical approach by adopting a wider range of complementary techniques.}, number = {1}, urldate = {2021-11-08}, journal = {Royal Society Open Science}, author = {Marsman, Maarten and Schönbrodt, Felix D. and Morey, Richard D. and Yao, Yuling and Gelman, Andrew and Wagenmakers, Eric-Jan}, note = {Publisher: Royal Society}, keywords = {Bayes factor, credible interval, evidence, preregistration, reproducibility}, pages = {160426}, }
@techreport{noauthor_metaarxiv_nodate, title = {{MetaArXiv} {Preprints} {\textbar} {Observing} {Many} {Researchers} {Using} the {Same} {Data} and {Hypothesis} {Reveals} a {Hidden} {Universe} of {Uncertainty}}, url = {https://osf.io/preprints/metaarxiv/cd5j9/}, language = {en-us}, urldate = {2021-11-08}, note = {type: article}, }
@article{smaldino_natural_nodate, title = {The natural selection of bad science}, volume = {3}, url = {https://royalsocietypublishing.org/doi/full/10.1098/rsos.160384}, doi = {10.1098/rsos.160384}, abstract = {Poor research design and data analysis encourage false-positive findings. Such poor methods persist despite perennial calls for improvement, suggesting that they result from something more than just misunderstanding. The persistence of poor methods results partly from incentives that favour them, leading to the natural selection of bad science. This dynamic requires no conscious strategizing—no deliberate cheating nor loafing—by scientists, only that publication is a principal factor for career advancement. Some normative methods of analysis have almost certainly been selected to further publication instead of discovery. In order to improve the culture of science, a shift must be made away from correcting misunderstandings and towards rewarding understanding. We support this argument with empirical evidence and computational modelling. We first present a 60-year meta-analysis of statistical power in the behavioural sciences and show that power has not improved despite repeated demonstrations of the necessity of increasing power. To demonstrate the logical consequences of structural incentives, we then present a dynamic model of scientific communities in which competing laboratories investigate novel or previously published hypotheses using culturally transmitted research methods. As in the real world, successful labs produce more ‘progeny,’ such that their methods are more often copied and their students are more likely to start labs of their own. Selection for high output leads to poorer methods and increasingly high false discovery rates. We additionally show that replication slows but does not stop the process of methodological deterioration. Improving the quality of research requires change at the institutional level.}, number = {9}, urldate = {2021-11-08}, journal = {Royal Society Open Science}, author = {Smaldino, Paul E. and McElreath, Richard}, note = {Publisher: Royal Society}, keywords = {Campbell’s Law, cultural evolution, incentives, metascience, replication, statistical power}, pages = {160384}, }