<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F5201095%2Fitems%3Fkey%3D7qHIy4DrYxGHcP2M7pZXOL4Q%26format%3Dbibtex%26limit%3D100&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F5201095%2Fitems%3Fkey%3D7qHIy4DrYxGHcP2M7pZXOL4Q%26format%3Dbibtex%26limit%3D100");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F5201095%2Fitems%3Fkey%3D7qHIy4DrYxGHcP2M7pZXOL4Q%26format%3Dbibtex%26limit%3D100"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@article{noy_experimental_2023, title = {Experimental evidence on the productivity effects of generative artificial intelligence}, volume = {381}, url = {https://www.science.org/doi/10.1126/science.adh2586}, doi = {10.1126/science.adh2586}, abstract = {We examined the productivity effects of a generative artificial intelligence (AI) technology, the assistive chatbot ChatGPT, in the context of midlevel professional writing tasks. In a preregistered online experiment, we assigned occupation-specific, incentivized writing tasks to 453 college-educated professionals and randomly exposed half of them to ChatGPT. Our results show that ChatGPT substantially raised productivity: The average time taken decreased by 40\% and output quality rose by 18\%. Inequality between workers decreased, and concern and excitement about AI temporarily rose. Workers exposed to ChatGPT during the experiment were 2 times as likely to report using it in their real job 2 weeks after the experiment and 1.6 times as likely 2 months after the experiment.}, number = {6654}, urldate = {2023-09-26}, journal = {Science}, author = {Noy, Shakked and Zhang, Whitney}, month = jul, year = {2023}, note = {Publisher: American Association for the Advancement of Science}, pages = {187--192}, }
@misc{fan_bibliometric_2023, title = {A {Bibliometric} {Review} of {Large} {Language} {Models} {Research} from 2017 to 2023}, url = {http://arxiv.org/abs/2304.02020}, doi = {10.48550/arXiv.2304.02020}, abstract = {Large language models (LLMs) are a class of language models that have demonstrated outstanding performance across a range of natural language processing (NLP) tasks and have become a highly sought-after research area, because of their ability to generate human-like language and their potential to revolutionize science and technology. In this study, we conduct bibliometric and discourse analyses of scholarly literature on LLMs. Synthesizing over 5,000 publications, this paper serves as a roadmap for researchers, practitioners, and policymakers to navigate the current landscape of LLMs research. We present the research trends from 2017 to early 2023, identifying patterns in research paradigms and collaborations. We start with analyzing the core algorithm developments and NLP tasks that are fundamental in LLMs research. We then investigate the applications of LLMs in various fields and domains including medicine, engineering, social science, and humanities. Our review also reveals the dynamic, fast-paced evolution of LLMs research. Overall, this paper offers valuable insights into the current state, impact, and potential of LLMs research and its applications.}, urldate = {2023-09-24}, publisher = {arXiv}, author = {Fan, Lizhou and Li, Lingyao and Ma, Zihui and Lee, Sanggyu and Yu, Huizi and Hemphill, Libby}, month = apr, year = {2023}, note = {arXiv:2304.02020 [cs]}, keywords = {Computer Science - Computation and Language, Computer Science - Computers and Society, Computer Science - Digital Libraries, Computer Science - Social and Information Networks}, }
@techreport{unesco_chatgpt_2023, title = {{ChatGPT} e {Inteligencia} {Artificial} en la educación superior: {Guía} de inicio rápido}, institution = {UNESCO}, author = {UNESCO}, year = {2023}, keywords = {/unread, ⛔ No INSPIRE recid found}, }
@article{sohail_decoding_2023, title = {Decoding {ChatGPT}: {A} taxonomy of existing research, current challenges, and possible future directions}, volume = {35}, issn = {1319-1578}, shorttitle = {Decoding {ChatGPT}}, url = {https://www.sciencedirect.com/science/article/pii/S131915782300229X}, doi = {10/gssdch}, abstract = {Chat Generative Pre-trained Transformer (ChatGPT) has gained significant interest and attention since its launch in November 2022. It has shown impressive performance in various domains, including passing exams and creative writing. However, challenges and concerns related to biases and trust persist. In this work, we present a comprehensive review of over 100 Scopus-indexed publications on ChatGPT, aiming to provide a taxonomy of ChatGPT research and explore its applications. We critically analyze the existing literature, identifying common approaches employed in the studies. Additionally, we investigate diverse application areas where ChatGPT has found utility, such as healthcare, marketing and financial services, software engineering, academic and scientific writing, research and education, environmental science, and natural language processing. Through examining these applications, we gain valuable insights into the potential of ChatGPT in addressing real-world challenges. We also discuss crucial issues related to ChatGPT, including biases and trustworthiness, emphasizing the need for further research and development in these areas. Furthermore, we identify potential future directions for ChatGPT research, proposing solutions to current challenges and speculating on expected advancements. By fully leveraging the capabilities of ChatGPT, we can unlock its potential across various domains, leading to advancements in conversational AI and transformative impacts in society.}, number = {8}, urldate = {2023-09-24}, journal = {Journal of King Saud University - Computer and Information Sciences}, author = {Sohail, Shahab Saquib and Farhat, Faiza and Himeur, Yassine and Nadeem, Mohammad and Madsen, Dag Øivind and Singh, Yashbir and Atalla, Shadi and Mansoor, Wathiq}, month = sep, year = {2023}, note = {0 citations (Crossref) [2023-09-23]}, keywords = {/unread, AI Generated Content (AIGC), ChatGPT, Generative Pre-trained Transformer (GPT), Large language models (LLMs), Systematic review, Trustworthy AI, ⛔ No INSPIRE recid found}, pages = {101675}, }
@article{siche_modelo_2023, title = {El modelo de lenguaje basado en inteligencia artificial sensible - {ChatGPT}: {Análisis} bibliométrico y posibles usos en la agricultura y pecuaria}, volume = {14}, copyright = {Derechos de autor 2023 Scientia Agropecuaria}, issn = {2306-6741}, shorttitle = {El modelo de lenguaje basado en inteligencia artificial sensible - {ChatGPT}}, url = {https://revistas.unitru.edu.pe/index.php/scientiaagrop/article/view/5098}, doi = {10/gssc6d}, abstract = {ChatGPT adds to the list of artificial intelligence-based systems designed to perform specific tasks and answer questions by interacting with users (Apple's Siri, Amazon's Alexa, Google's Assistant and Bard, Microsoft's Cortana, IBM's Watson, Bixby from Samsung, among others). ChatGPT works using OpenAI's GPT (Generative Pretrained Transformer) language model and is capable of learning from users' preferences and behavior patterns to customize its response. ChatGPT has the potential to be applied in different fields, including education, journalism, scientific writing, communication, cell biology, and biotechnology, where there is already evidence. The aim of this work was to analyze the possible applications of ChatGPT in the agricultural and livestock industry. First, a scientometric analysis was performed with VosViewer and Bibliometrix (Bliblioshiny). 3 clusters were identified: (a) Main characteristics; (b) learning systems you use; and (c) applications. To the question: What are the main applications in which ChatGTP will revolutionize agriculture (or livestock) in the world? ChatGPT responded: (a) in the agricultural field: improvement of agricultural decision-making, optimization of agricultural production, detection and prevention of plant diseases, climate management, and supply chain management; and (b) in the livestock field: improvement of animal health and welfare, optimization of animal production, supply chain management, detection and prevention of zoonotic diseases, and climate management for animal production. ChatGPT does not scientifically support its answer, but from the analysis carried out, we find that there is enough scientific evidence to conclude, in this case, that its answers were correct. While ChatGPT does not necessarily scientifically substantiate its answers, users should. There is a lack of studies on the use of Artificial Intelligence and its relationship with ethics.}, language = {es}, number = {1}, urldate = {2023-09-23}, journal = {Scientia Agropecuaria}, author = {Siche, Raúl and Siche, Nikol}, month = mar, year = {2023}, note = {Number: 1}, keywords = {/unread, chatbot, ⛔ No INSPIRE recid found}, pages = {111--116}, }
@article{crawford_leadership_2023, title = {Leadership is needed for ethical {ChatGPT}: {Character}, assessment, and learning using artificial intelligence ({AI})}, volume = {20}, shorttitle = {Leadership is needed for ethical {ChatGPT}}, url = {https://ro.uow.edu.au/jutlp/vol20/iss3/02}, doi = {10/gspfxq}, number = {3}, journal = {Journal of University Teaching \& Learning Practice}, author = {Crawford, Joseph and Cowling, Michael and Allen, Kelly-Ann}, month = mar, year = {2023}, note = {18 citations (Crossref) [2023-09-23]}, keywords = {/unread, ⛔ No INSPIRE recid found}, }
@article{else_abstracts_2023, title = {Abstracts written by {ChatGPT} fool scientists}, volume = {613}, copyright = {2023 Springer Nature Limited}, url = {https://www.nature.com/articles/d41586-023-00056-7}, doi = {10/js2g}, abstract = {Researchers cannot always differentiate between AI-generated and original abstracts.}, language = {en}, number = {7944}, urldate = {2023-09-23}, journal = {Nature}, author = {Else, Holly}, month = jan, year = {2023}, note = {159 citations (Crossref) [2023-09-23] Bandiera\_abtest: a Cg\_type: News Number: 7944 Publisher: Nature Publishing Group Subject\_term: Publishing, Machine learning, Mathematics and computing}, keywords = {/unread, Machine learning, Mathematics and computing, Publishing, ⛔ No INSPIRE recid found}, pages = {423--423}, }
@article{van_dis_chatgpt_2023, title = {{ChatGPT}: five priorities for research}, volume = {614}, copyright = {2023 Springer Nature Limited}, shorttitle = {{ChatGPT}}, url = {https://www.nature.com/articles/d41586-023-00288-7}, doi = {10/grq6r2}, abstract = {Conversational AI is a game-changer for science. Here’s how to respond.}, language = {en}, number = {7947}, urldate = {2023-09-23}, journal = {Nature}, author = {van Dis, Eva A. M. and Bollen, Johan and Zuidema, Willem and van Rooij, Robert and Bockting, Claudi L.}, month = feb, year = {2023}, note = {280 citations (Crossref) [2023-09-23] Bandiera\_abtest: a Cg\_type: Comment Number: 7947 Publisher: Nature Publishing Group Subject\_term: Computer science, Research management, Publishing, Machine learning}, keywords = {/unread, Computer science, Machine learning, Publishing, Research management, ⛔ No INSPIRE recid found}, pages = {224--226}, }
@misc{vaswani_attention_2023, title = {Attention {Is} {All} {You} {Need}}, url = {http://arxiv.org/abs/1706.03762}, doi = {10.48550/arXiv.1706.03762}, abstract = {The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.}, urldate = {2023-09-23}, publisher = {arXiv}, author = {Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N. and Kaiser, Lukasz and Polosukhin, Illia}, month = aug, year = {2023}, note = {arXiv:1706.03762 [cs]}, keywords = {/unread, Computer Science - Computation and Language, Computer Science - Machine Learning, ⛔ No INSPIRE recid found}, }
@misc{shanahan_talking_2023, title = {Talking {About} {Large} {Language} {Models}}, url = {http://arxiv.org/abs/2212.03551}, abstract = {Thanks to rapid progress in artificial intelligence, we have entered an era when technology and philosophy intersect in interesting ways. Sitting squarely at the centre of this intersection are large language models (LLMs). The more adept LLMs become at mimicking human language, the more vulnerable we become to anthropomorphism, to seeing the systems in which they are embedded as more human-like than they really are. This trend is amplified by the natural tendency to use philosophically loaded terms, such as "knows", "believes", and "thinks", when describing these systems. To mitigate this trend, this paper advocates the practice of repeatedly stepping back to remind ourselves of how LLMs, and the systems of which they form a part, actually work. The hope is that increased scientific precision will encourage more philosophical nuance in the discourse around artificial intelligence, both within the field and in the public sphere.}, urldate = {2023-09-23}, publisher = {arXiv}, author = {Shanahan, Murray}, month = feb, year = {2023}, note = {arXiv:2212.03551 [cs]}, keywords = {/unread, Computer Science - Computation and Language, Computer Science - Machine Learning, ⛔ No INSPIRE recid found}, }
@book{niemi_ai_2023, title = {{AI} in {Learning}: {Designing} the {Future}}, shorttitle = {{AI} in {Learning}}, url = {https://library.oapen.org/handle/20.500.12657/60151}, abstract = {AI (Artificial Intelligence) is predicted to radically change teaching and learning in both schools and industry causing radical disruption of work. AI can support well-being initiatives and lifelong learning but educational institutions and companies need to take the changing technology into account. Moving towards AI supported by digital tools requires a dramatic shift in the concept of learning, expertise and the businesses built off of it. Based on the latest research on AI and how it is changing learning and education, this book will focus on the enormous opportunities to expand educational settings with AI for learning in and beyond the traditional classroom. This open access book also introduces ethical challenges related to learning and education, while connecting human learning and machine learning. This book will be of use to a variety of readers, including researchers, AI users, companies and policy makers.}, language = {English}, urldate = {2023-09-23}, publisher = {Springer Nature}, editor = {Niemi, Hannele and Pea, Roy D. and Lu, Yu}, year = {2023}, doi = {10.1007/978-3-031-09687-7}, note = {Accepted: 2022-12-13T12:35:17Z}, keywords = {/unread, artificial intelligence, bic Book Industry Communication::H Humanities::HP Philosophy::HPM Philosophy of mind, bic Book Industry Communication::J Society \& social sciences::JM Psychology, bic Book Industry Communication::J Society \& social sciences::JN Education, bic Book Industry Communication::U Computing \& information technology::UB Information technology: general issues, bic Book Industry Communication::U Computing \& information technology::UY Computer science::UYQ Artificial intelligence, deep learning, games, human-machine interaction, intelligent digital tools, learning analytics, life-long learning, robotics, simulations, tutoring, virtual learning, well-being, ⛔ No INSPIRE recid found}, }
@misc{liu_pre-train_2021, title = {Pre-train, {Prompt}, and {Predict}: {A} {Systematic} {Survey} of {Prompting} {Methods} in {Natural} {Language} {Processing}}, shorttitle = {Pre-train, {Prompt}, and {Predict}}, url = {http://arxiv.org/abs/2107.13586}, doi = {10.48550/arXiv.2107.13586}, abstract = {This paper surveys and organizes research works in a new paradigm in natural language processing, which we dub "prompt-based learning". Unlike traditional supervised learning, which trains a model to take in an input x and predict an output y as P(y{\textbar}x), prompt-based learning is based on language models that model the probability of text directly. To use these models to perform prediction tasks, the original input x is modified using a template into a textual string prompt x' that has some unfilled slots, and then the language model is used to probabilistically fill the unfilled information to obtain a final string x, from which the final output y can be derived. This framework is powerful and attractive for a number of reasons: it allows the language model to be pre-trained on massive amounts of raw text, and by defining a new prompting function the model is able to perform few-shot or even zero-shot learning, adapting to new scenarios with few or no labeled data. In this paper we introduce the basics of this promising paradigm, describe a unified set of mathematical notations that can cover a wide variety of existing work, and organize existing work along several dimensions, e.g.the choice of pre-trained models, prompts, and tuning strategies. To make the field more accessible to interested beginners, we not only make a systematic review of existing works and a highly structured typology of prompt-based concepts, but also release other resources, e.g., a website http://pretrain.nlpedia.ai/ including constantly-updated survey, and paperlist.}, urldate = {2023-09-24}, publisher = {arXiv}, author = {Liu, Pengfei and Yuan, Weizhe and Fu, Jinlan and Jiang, Zhengbao and Hayashi, Hiroaki and Neubig, Graham}, month = jul, year = {2021}, note = {arXiv:2107.13586 [cs]}, keywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Machine Learning}, }
@misc{noauthor_how_nodate, title = {How will {Language} {Modelers} like {ChatGPT} {Affect} {Occupations} and {Industries}? by {Edward} {W}. {Felten}, {Manav} {Raj}, {Robert} {Seamans} :: {SSRN}}, url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4375268}, urldate = {2023-09-26}, }
@article{noauthor_opinion_nodate, title = {Opinion {Paper}: “{So} what if {ChatGPT} wrote it?” {Multidisciplinary} perspectives on opportunities, challenges and implications of generative conversational {AI} for research, practice and policy - {ScienceDirect}}, url = {https://www.sciencedirect.com/science/article/pii/S0268401223000233?via%3Dihub}, doi = {https://doi.org/10.1016/j.ijinfomgt.2023.102642}, urldate = {2023-09-23}, keywords = {/unread, ⛔ No INSPIRE recid found}, }
@misc{noauthor_chatgpt_nodate, title = {{ChatGPT} in {Healthcare}: {A} {Taxonomy} and {Systematic} {Review} {\textbar} {medRxiv}}, url = {https://www.medrxiv.org/content/10.1101/2023.03.30.23287899v1}, urldate = {2023-09-23}, keywords = {/unread, ⛔ No INSPIRE recid found}, }
@misc{noauthor_chatgpt_nodate-1, title = {{ChatGPT} is fun, but not an author {\textbar} {Science}}, url = {https://www.science.org/doi/10.1126/science.adg7879}, urldate = {2023-09-23}, keywords = {/unread, ⛔ No INSPIRE recid found}, }