Evaluating Large Language Models Trained on Code. Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. d. O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., Ray, A., Puri, R., Krueger, G., Petrov, M., Khlaaf, H., Sastry, G., Mishkin, P., Chan, B., Gray, S., Ryder, N., Pavlov, M., Power, A., Kaiser, L., Bavarian, M., Winter, C., Tillet, P., Such, F. P., Cummings, D., Plappert, M., Chantzis, F., Barnes, E., Herbert-Voss, A., Guss, W. H., Nichol, A., Paino, A., Tezak, N., Tang, J., Babuschkin, I., Balaji, S., Jain, S., Saunders, W., Hesse, C., Carr, A. N., Leike, J., Achiam, J., Misra, V., Morikawa, E., Radford, A., Knight, M., Brundage, M., Murati, M., Mayer, K., Welinder, P., McGrew, B., Amodei, D., McCandlish, S., Sutskever, I., & Zaremba, W. arXiv:2107.03374 [cs], July, 2021. arXiv: 2107.03374Paper abstract bibtex We introduce Codex, a GPT language model fine-tuned on publicly available code from GitHub, and study its Python code-writing capabilities. A distinct production version of Codex powers GitHub Copilot. On HumanEval, a new evaluation set we release to measure functional correctness for synthesizing programs from docstrings, our model solves 28.8% of the problems, while GPT-3 solves 0% and GPT-J solves 11.4%. Furthermore, we find that repeated sampling from the model is a surprisingly effective strategy for producing working solutions to difficult prompts. Using this method, we solve 70.2% of our problems with 100 samples per problem. Careful investigation of our model reveals its limitations, including difficulty with docstrings describing long chains of operations and with binding operations to variables. Finally, we discuss the potential broader impacts of deploying powerful code generation technologies, covering safety, security, and economics.
@article{chen_evaluating_2021,
title = {Evaluating {Large} {Language} {Models} {Trained} on {Code}},
url = {http://arxiv.org/abs/2107.03374},
abstract = {We introduce Codex, a GPT language model fine-tuned on publicly available code from GitHub, and study its Python code-writing capabilities. A distinct production version of Codex powers GitHub Copilot. On HumanEval, a new evaluation set we release to measure functional correctness for synthesizing programs from docstrings, our model solves 28.8\% of the problems, while GPT-3 solves 0\% and GPT-J solves 11.4\%. Furthermore, we find that repeated sampling from the model is a surprisingly effective strategy for producing working solutions to difficult prompts. Using this method, we solve 70.2\% of our problems with 100 samples per problem. Careful investigation of our model reveals its limitations, including difficulty with docstrings describing long chains of operations and with binding operations to variables. Finally, we discuss the potential broader impacts of deploying powerful code generation technologies, covering safety, security, and economics.},
urldate = {2021-08-04},
journal = {arXiv:2107.03374 [cs]},
author = {Chen, Mark and Tworek, Jerry and Jun, Heewoo and Yuan, Qiming and Pinto, Henrique Ponde de Oliveira and Kaplan, Jared and Edwards, Harri and Burda, Yuri and Joseph, Nicholas and Brockman, Greg and Ray, Alex and Puri, Raul and Krueger, Gretchen and Petrov, Michael and Khlaaf, Heidy and Sastry, Girish and Mishkin, Pamela and Chan, Brooke and Gray, Scott and Ryder, Nick and Pavlov, Mikhail and Power, Alethea and Kaiser, Lukasz and Bavarian, Mohammad and Winter, Clemens and Tillet, Philippe and Such, Felipe Petroski and Cummings, Dave and Plappert, Matthias and Chantzis, Fotios and Barnes, Elizabeth and Herbert-Voss, Ariel and Guss, William Hebgen and Nichol, Alex and Paino, Alex and Tezak, Nikolas and Tang, Jie and Babuschkin, Igor and Balaji, Suchir and Jain, Shantanu and Saunders, William and Hesse, Christopher and Carr, Andrew N. and Leike, Jan and Achiam, Josh and Misra, Vedant and Morikawa, Evan and Radford, Alec and Knight, Matthew and Brundage, Miles and Murati, Mira and Mayer, Katie and Welinder, Peter and McGrew, Bob and Amodei, Dario and McCandlish, Sam and Sutskever, Ilya and Zaremba, Wojciech},
month = jul,
year = {2021},
note = {arXiv: 2107.03374},
keywords = {Computer Science - Machine Learning},
}
Downloads: 0
{"_id":"unCWpN5o8Lj66muFy","bibbaseid":"chen-tworek-jun-yuan-pinto-kaplan-edwards-burda-etal-evaluatinglargelanguagemodelstrainedoncode-2021","author_short":["Chen, M.","Tworek, J.","Jun, H.","Yuan, Q.","Pinto, H. P. d. O.","Kaplan, J.","Edwards, H.","Burda, Y.","Joseph, N.","Brockman, G.","Ray, A.","Puri, R.","Krueger, G.","Petrov, M.","Khlaaf, H.","Sastry, G.","Mishkin, P.","Chan, B.","Gray, S.","Ryder, N.","Pavlov, M.","Power, A.","Kaiser, L.","Bavarian, M.","Winter, C.","Tillet, P.","Such, F. P.","Cummings, D.","Plappert, M.","Chantzis, F.","Barnes, E.","Herbert-Voss, A.","Guss, W. H.","Nichol, A.","Paino, A.","Tezak, N.","Tang, J.","Babuschkin, I.","Balaji, S.","Jain, S.","Saunders, W.","Hesse, C.","Carr, A. N.","Leike, J.","Achiam, J.","Misra, V.","Morikawa, E.","Radford, A.","Knight, M.","Brundage, M.","Murati, M.","Mayer, K.","Welinder, P.","McGrew, B.","Amodei, D.","McCandlish, S.","Sutskever, I.","Zaremba, W."],"bibdata":{"bibtype":"article","type":"article","title":"Evaluating Large Language Models Trained on Code","url":"http://arxiv.org/abs/2107.03374","abstract":"We introduce Codex, a GPT language model fine-tuned on publicly available code from GitHub, and study its Python code-writing capabilities. A distinct production version of Codex powers GitHub Copilot. On HumanEval, a new evaluation set we release to measure functional correctness for synthesizing programs from docstrings, our model solves 28.8% of the problems, while GPT-3 solves 0% and GPT-J solves 11.4%. Furthermore, we find that repeated sampling from the model is a surprisingly effective strategy for producing working solutions to difficult prompts. Using this method, we solve 70.2% of our problems with 100 samples per problem. Careful investigation of our model reveals its limitations, including difficulty with docstrings describing long chains of operations and with binding operations to variables. Finally, we discuss the potential broader impacts of deploying powerful code generation technologies, covering safety, security, and economics.","urldate":"2021-08-04","journal":"arXiv:2107.03374 [cs]","author":[{"propositions":[],"lastnames":["Chen"],"firstnames":["Mark"],"suffixes":[]},{"propositions":[],"lastnames":["Tworek"],"firstnames":["Jerry"],"suffixes":[]},{"propositions":[],"lastnames":["Jun"],"firstnames":["Heewoo"],"suffixes":[]},{"propositions":[],"lastnames":["Yuan"],"firstnames":["Qiming"],"suffixes":[]},{"propositions":[],"lastnames":["Pinto"],"firstnames":["Henrique","Ponde","de","Oliveira"],"suffixes":[]},{"propositions":[],"lastnames":["Kaplan"],"firstnames":["Jared"],"suffixes":[]},{"propositions":[],"lastnames":["Edwards"],"firstnames":["Harri"],"suffixes":[]},{"propositions":[],"lastnames":["Burda"],"firstnames":["Yuri"],"suffixes":[]},{"propositions":[],"lastnames":["Joseph"],"firstnames":["Nicholas"],"suffixes":[]},{"propositions":[],"lastnames":["Brockman"],"firstnames":["Greg"],"suffixes":[]},{"propositions":[],"lastnames":["Ray"],"firstnames":["Alex"],"suffixes":[]},{"propositions":[],"lastnames":["Puri"],"firstnames":["Raul"],"suffixes":[]},{"propositions":[],"lastnames":["Krueger"],"firstnames":["Gretchen"],"suffixes":[]},{"propositions":[],"lastnames":["Petrov"],"firstnames":["Michael"],"suffixes":[]},{"propositions":[],"lastnames":["Khlaaf"],"firstnames":["Heidy"],"suffixes":[]},{"propositions":[],"lastnames":["Sastry"],"firstnames":["Girish"],"suffixes":[]},{"propositions":[],"lastnames":["Mishkin"],"firstnames":["Pamela"],"suffixes":[]},{"propositions":[],"lastnames":["Chan"],"firstnames":["Brooke"],"suffixes":[]},{"propositions":[],"lastnames":["Gray"],"firstnames":["Scott"],"suffixes":[]},{"propositions":[],"lastnames":["Ryder"],"firstnames":["Nick"],"suffixes":[]},{"propositions":[],"lastnames":["Pavlov"],"firstnames":["Mikhail"],"suffixes":[]},{"propositions":[],"lastnames":["Power"],"firstnames":["Alethea"],"suffixes":[]},{"propositions":[],"lastnames":["Kaiser"],"firstnames":["Lukasz"],"suffixes":[]},{"propositions":[],"lastnames":["Bavarian"],"firstnames":["Mohammad"],"suffixes":[]},{"propositions":[],"lastnames":["Winter"],"firstnames":["Clemens"],"suffixes":[]},{"propositions":[],"lastnames":["Tillet"],"firstnames":["Philippe"],"suffixes":[]},{"propositions":[],"lastnames":["Such"],"firstnames":["Felipe","Petroski"],"suffixes":[]},{"propositions":[],"lastnames":["Cummings"],"firstnames":["Dave"],"suffixes":[]},{"propositions":[],"lastnames":["Plappert"],"firstnames":["Matthias"],"suffixes":[]},{"propositions":[],"lastnames":["Chantzis"],"firstnames":["Fotios"],"suffixes":[]},{"propositions":[],"lastnames":["Barnes"],"firstnames":["Elizabeth"],"suffixes":[]},{"propositions":[],"lastnames":["Herbert-Voss"],"firstnames":["Ariel"],"suffixes":[]},{"propositions":[],"lastnames":["Guss"],"firstnames":["William","Hebgen"],"suffixes":[]},{"propositions":[],"lastnames":["Nichol"],"firstnames":["Alex"],"suffixes":[]},{"propositions":[],"lastnames":["Paino"],"firstnames":["Alex"],"suffixes":[]},{"propositions":[],"lastnames":["Tezak"],"firstnames":["Nikolas"],"suffixes":[]},{"propositions":[],"lastnames":["Tang"],"firstnames":["Jie"],"suffixes":[]},{"propositions":[],"lastnames":["Babuschkin"],"firstnames":["Igor"],"suffixes":[]},{"propositions":[],"lastnames":["Balaji"],"firstnames":["Suchir"],"suffixes":[]},{"propositions":[],"lastnames":["Jain"],"firstnames":["Shantanu"],"suffixes":[]},{"propositions":[],"lastnames":["Saunders"],"firstnames":["William"],"suffixes":[]},{"propositions":[],"lastnames":["Hesse"],"firstnames":["Christopher"],"suffixes":[]},{"propositions":[],"lastnames":["Carr"],"firstnames":["Andrew","N."],"suffixes":[]},{"propositions":[],"lastnames":["Leike"],"firstnames":["Jan"],"suffixes":[]},{"propositions":[],"lastnames":["Achiam"],"firstnames":["Josh"],"suffixes":[]},{"propositions":[],"lastnames":["Misra"],"firstnames":["Vedant"],"suffixes":[]},{"propositions":[],"lastnames":["Morikawa"],"firstnames":["Evan"],"suffixes":[]},{"propositions":[],"lastnames":["Radford"],"firstnames":["Alec"],"suffixes":[]},{"propositions":[],"lastnames":["Knight"],"firstnames":["Matthew"],"suffixes":[]},{"propositions":[],"lastnames":["Brundage"],"firstnames":["Miles"],"suffixes":[]},{"propositions":[],"lastnames":["Murati"],"firstnames":["Mira"],"suffixes":[]},{"propositions":[],"lastnames":["Mayer"],"firstnames":["Katie"],"suffixes":[]},{"propositions":[],"lastnames":["Welinder"],"firstnames":["Peter"],"suffixes":[]},{"propositions":[],"lastnames":["McGrew"],"firstnames":["Bob"],"suffixes":[]},{"propositions":[],"lastnames":["Amodei"],"firstnames":["Dario"],"suffixes":[]},{"propositions":[],"lastnames":["McCandlish"],"firstnames":["Sam"],"suffixes":[]},{"propositions":[],"lastnames":["Sutskever"],"firstnames":["Ilya"],"suffixes":[]},{"propositions":[],"lastnames":["Zaremba"],"firstnames":["Wojciech"],"suffixes":[]}],"month":"July","year":"2021","note":"arXiv: 2107.03374","keywords":"Computer Science - Machine Learning","bibtex":"@article{chen_evaluating_2021,\n\ttitle = {Evaluating {Large} {Language} {Models} {Trained} on {Code}},\n\turl = {http://arxiv.org/abs/2107.03374},\n\tabstract = {We introduce Codex, a GPT language model fine-tuned on publicly available code from GitHub, and study its Python code-writing capabilities. A distinct production version of Codex powers GitHub Copilot. On HumanEval, a new evaluation set we release to measure functional correctness for synthesizing programs from docstrings, our model solves 28.8\\% of the problems, while GPT-3 solves 0\\% and GPT-J solves 11.4\\%. Furthermore, we find that repeated sampling from the model is a surprisingly effective strategy for producing working solutions to difficult prompts. Using this method, we solve 70.2\\% of our problems with 100 samples per problem. Careful investigation of our model reveals its limitations, including difficulty with docstrings describing long chains of operations and with binding operations to variables. Finally, we discuss the potential broader impacts of deploying powerful code generation technologies, covering safety, security, and economics.},\n\turldate = {2021-08-04},\n\tjournal = {arXiv:2107.03374 [cs]},\n\tauthor = {Chen, Mark and Tworek, Jerry and Jun, Heewoo and Yuan, Qiming and Pinto, Henrique Ponde de Oliveira and Kaplan, Jared and Edwards, Harri and Burda, Yuri and Joseph, Nicholas and Brockman, Greg and Ray, Alex and Puri, Raul and Krueger, Gretchen and Petrov, Michael and Khlaaf, Heidy and Sastry, Girish and Mishkin, Pamela and Chan, Brooke and Gray, Scott and Ryder, Nick and Pavlov, Mikhail and Power, Alethea and Kaiser, Lukasz and Bavarian, Mohammad and Winter, Clemens and Tillet, Philippe and Such, Felipe Petroski and Cummings, Dave and Plappert, Matthias and Chantzis, Fotios and Barnes, Elizabeth and Herbert-Voss, Ariel and Guss, William Hebgen and Nichol, Alex and Paino, Alex and Tezak, Nikolas and Tang, Jie and Babuschkin, Igor and Balaji, Suchir and Jain, Shantanu and Saunders, William and Hesse, Christopher and Carr, Andrew N. and Leike, Jan and Achiam, Josh and Misra, Vedant and Morikawa, Evan and Radford, Alec and Knight, Matthew and Brundage, Miles and Murati, Mira and Mayer, Katie and Welinder, Peter and McGrew, Bob and Amodei, Dario and McCandlish, Sam and Sutskever, Ilya and Zaremba, Wojciech},\n\tmonth = jul,\n\tyear = {2021},\n\tnote = {arXiv: 2107.03374},\n\tkeywords = {Computer Science - Machine Learning},\n}\n\n","author_short":["Chen, M.","Tworek, J.","Jun, H.","Yuan, Q.","Pinto, H. P. d. O.","Kaplan, J.","Edwards, H.","Burda, Y.","Joseph, N.","Brockman, G.","Ray, A.","Puri, R.","Krueger, G.","Petrov, M.","Khlaaf, H.","Sastry, G.","Mishkin, P.","Chan, B.","Gray, S.","Ryder, N.","Pavlov, M.","Power, A.","Kaiser, L.","Bavarian, M.","Winter, C.","Tillet, P.","Such, F. P.","Cummings, D.","Plappert, M.","Chantzis, F.","Barnes, E.","Herbert-Voss, A.","Guss, W. H.","Nichol, A.","Paino, A.","Tezak, N.","Tang, J.","Babuschkin, I.","Balaji, S.","Jain, S.","Saunders, W.","Hesse, C.","Carr, A. N.","Leike, J.","Achiam, J.","Misra, V.","Morikawa, E.","Radford, A.","Knight, M.","Brundage, M.","Murati, M.","Mayer, K.","Welinder, P.","McGrew, B.","Amodei, D.","McCandlish, S.","Sutskever, I.","Zaremba, W."],"key":"chen_evaluating_2021-1","id":"chen_evaluating_2021-1","bibbaseid":"chen-tworek-jun-yuan-pinto-kaplan-edwards-burda-etal-evaluatinglargelanguagemodelstrainedoncode-2021","role":"author","urls":{"Paper":"http://arxiv.org/abs/2107.03374"},"keyword":["Computer Science - Machine Learning"],"metadata":{"authorlinks":{}},"downloads":0,"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/bxt101","dataSources":["Wsv2bQ4jPuc7qme8R"],"keywords":["computer science - machine learning"],"search_terms":["evaluating","large","language","models","trained","code","chen","tworek","jun","yuan","pinto","kaplan","edwards","burda","joseph","brockman","ray","puri","krueger","petrov","khlaaf","sastry","mishkin","chan","gray","ryder","pavlov","power","kaiser","bavarian","winter","tillet","such","cummings","plappert","chantzis","barnes","herbert-voss","guss","nichol","paino","tezak","tang","babuschkin","balaji","jain","saunders","hesse","carr","leike","achiam","misra","morikawa","radford","knight","brundage","murati","mayer","welinder","mcgrew","amodei","mccandlish","sutskever","zaremba"],"title":"Evaluating Large Language Models Trained on Code","year":2021}