Mistral 7B. Jiang, A. Q., Sablayrolles, A., Mensch, A., Bamford, C., Chaplot, D. S., Casas, D. d. l., Bressand, F., Lengyel, G., Lample, G., Saulnier, L., Lavaud, L. R., Lachaux, M., Stock, P., Scao, T. L., Lavril, T., Wang, T., Lacroix, T., & Sayed, W. E. October, 2023. arXiv:2310.06825 [cs]
Paper doi abstract bibtex We introduce Mistral 7B v0.1, a 7-billion-parameter language model engineered for superior performance and efficiency. Mistral 7B outperforms Llama 2 13B across all evaluated benchmarks, and Llama 1 34B in reasoning, mathematics, and code generation. Our model leverages grouped-query attention (GQA) for faster inference, coupled with sliding window attention (SWA) to effectively handle sequences of arbitrary length with a reduced inference cost. We also provide a model fine-tuned to follow instructions, Mistral 7B – Instruct, that surpasses the Llama 2 13B – Chat model both on human and automated benchmarks. Our models are released under the Apache 2.0 license.
@misc{jiang_mistral_2023,
title = {Mistral {7B}},
url = {http://arxiv.org/abs/2310.06825},
doi = {10.48550/arXiv.2310.06825},
abstract = {We introduce Mistral 7B v0.1, a 7-billion-parameter language model engineered for superior performance and efficiency. Mistral 7B outperforms Llama 2 13B across all evaluated benchmarks, and Llama 1 34B in reasoning, mathematics, and code generation. Our model leverages grouped-query attention (GQA) for faster inference, coupled with sliding window attention (SWA) to effectively handle sequences of arbitrary length with a reduced inference cost. We also provide a model fine-tuned to follow instructions, Mistral 7B -- Instruct, that surpasses the Llama 2 13B -- Chat model both on human and automated benchmarks. Our models are released under the Apache 2.0 license.},
urldate = {2023-10-16},
publisher = {arXiv},
author = {Jiang, Albert Q. and Sablayrolles, Alexandre and Mensch, Arthur and Bamford, Chris and Chaplot, Devendra Singh and Casas, Diego de las and Bressand, Florian and Lengyel, Gianna and Lample, Guillaume and Saulnier, Lucile and Lavaud, Lélio Renard and Lachaux, Marie-Anne and Stock, Pierre and Scao, Teven Le and Lavril, Thibaut and Wang, Thomas and Lacroix, Timothée and Sayed, William El},
month = oct,
year = {2023},
note = {arXiv:2310.06825 [cs]},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Machine Learning},
}
Downloads: 0
{"_id":"u3TDrk89ZuDR5JYRN","bibbaseid":"jiang-sablayrolles-mensch-bamford-chaplot-casas-bressand-lengyel-etal-mistral7b-2023","author_short":["Jiang, A. Q.","Sablayrolles, A.","Mensch, A.","Bamford, C.","Chaplot, D. S.","Casas, D. d. l.","Bressand, F.","Lengyel, G.","Lample, G.","Saulnier, L.","Lavaud, L. R.","Lachaux, M.","Stock, P.","Scao, T. L.","Lavril, T.","Wang, T.","Lacroix, T.","Sayed, W. E."],"bibdata":{"bibtype":"misc","type":"misc","title":"Mistral 7B","url":"http://arxiv.org/abs/2310.06825","doi":"10.48550/arXiv.2310.06825","abstract":"We introduce Mistral 7B v0.1, a 7-billion-parameter language model engineered for superior performance and efficiency. Mistral 7B outperforms Llama 2 13B across all evaluated benchmarks, and Llama 1 34B in reasoning, mathematics, and code generation. Our model leverages grouped-query attention (GQA) for faster inference, coupled with sliding window attention (SWA) to effectively handle sequences of arbitrary length with a reduced inference cost. We also provide a model fine-tuned to follow instructions, Mistral 7B – Instruct, that surpasses the Llama 2 13B – Chat model both on human and automated benchmarks. Our models are released under the Apache 2.0 license.","urldate":"2023-10-16","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Jiang"],"firstnames":["Albert","Q."],"suffixes":[]},{"propositions":[],"lastnames":["Sablayrolles"],"firstnames":["Alexandre"],"suffixes":[]},{"propositions":[],"lastnames":["Mensch"],"firstnames":["Arthur"],"suffixes":[]},{"propositions":[],"lastnames":["Bamford"],"firstnames":["Chris"],"suffixes":[]},{"propositions":[],"lastnames":["Chaplot"],"firstnames":["Devendra","Singh"],"suffixes":[]},{"propositions":[],"lastnames":["Casas"],"firstnames":["Diego","de","las"],"suffixes":[]},{"propositions":[],"lastnames":["Bressand"],"firstnames":["Florian"],"suffixes":[]},{"propositions":[],"lastnames":["Lengyel"],"firstnames":["Gianna"],"suffixes":[]},{"propositions":[],"lastnames":["Lample"],"firstnames":["Guillaume"],"suffixes":[]},{"propositions":[],"lastnames":["Saulnier"],"firstnames":["Lucile"],"suffixes":[]},{"propositions":[],"lastnames":["Lavaud"],"firstnames":["Lélio","Renard"],"suffixes":[]},{"propositions":[],"lastnames":["Lachaux"],"firstnames":["Marie-Anne"],"suffixes":[]},{"propositions":[],"lastnames":["Stock"],"firstnames":["Pierre"],"suffixes":[]},{"propositions":[],"lastnames":["Scao"],"firstnames":["Teven","Le"],"suffixes":[]},{"propositions":[],"lastnames":["Lavril"],"firstnames":["Thibaut"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Thomas"],"suffixes":[]},{"propositions":[],"lastnames":["Lacroix"],"firstnames":["Timothée"],"suffixes":[]},{"propositions":[],"lastnames":["Sayed"],"firstnames":["William","El"],"suffixes":[]}],"month":"October","year":"2023","note":"arXiv:2310.06825 [cs]","keywords":"Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Machine Learning","bibtex":"@misc{jiang_mistral_2023,\n\ttitle = {Mistral {7B}},\n\turl = {http://arxiv.org/abs/2310.06825},\n\tdoi = {10.48550/arXiv.2310.06825},\n\tabstract = {We introduce Mistral 7B v0.1, a 7-billion-parameter language model engineered for superior performance and efficiency. Mistral 7B outperforms Llama 2 13B across all evaluated benchmarks, and Llama 1 34B in reasoning, mathematics, and code generation. Our model leverages grouped-query attention (GQA) for faster inference, coupled with sliding window attention (SWA) to effectively handle sequences of arbitrary length with a reduced inference cost. We also provide a model fine-tuned to follow instructions, Mistral 7B -- Instruct, that surpasses the Llama 2 13B -- Chat model both on human and automated benchmarks. Our models are released under the Apache 2.0 license.},\n\turldate = {2023-10-16},\n\tpublisher = {arXiv},\n\tauthor = {Jiang, Albert Q. and Sablayrolles, Alexandre and Mensch, Arthur and Bamford, Chris and Chaplot, Devendra Singh and Casas, Diego de las and Bressand, Florian and Lengyel, Gianna and Lample, Guillaume and Saulnier, Lucile and Lavaud, Lélio Renard and Lachaux, Marie-Anne and Stock, Pierre and Scao, Teven Le and Lavril, Thibaut and Wang, Thomas and Lacroix, Timothée and Sayed, William El},\n\tmonth = oct,\n\tyear = {2023},\n\tnote = {arXiv:2310.06825 [cs]},\n\tkeywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Machine Learning},\n}\n\n\n\n\n\n\n\n\n\n\n\n","author_short":["Jiang, A. Q.","Sablayrolles, A.","Mensch, A.","Bamford, C.","Chaplot, D. S.","Casas, D. d. l.","Bressand, F.","Lengyel, G.","Lample, G.","Saulnier, L.","Lavaud, L. R.","Lachaux, M.","Stock, P.","Scao, T. L.","Lavril, T.","Wang, T.","Lacroix, T.","Sayed, W. E."],"key":"jiang_mistral_2023","id":"jiang_mistral_2023","bibbaseid":"jiang-sablayrolles-mensch-bamford-chaplot-casas-bressand-lengyel-etal-mistral7b-2023","role":"author","urls":{"Paper":"http://arxiv.org/abs/2310.06825"},"keyword":["Computer Science - Artificial Intelligence","Computer Science - Computation and Language","Computer Science - Machine Learning"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"misc","biburl":"https://bibbase.org/zotero/sarveshsoni","dataSources":["taWdMrienBzqHC2tC"],"keywords":["computer science - artificial intelligence","computer science - computation and language","computer science - machine learning"],"search_terms":["mistral","jiang","sablayrolles","mensch","bamford","chaplot","casas","bressand","lengyel","lample","saulnier","lavaud","lachaux","stock","scao","lavril","wang","lacroix","sayed"],"title":"Mistral 7B","year":2023}