LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models. Zheng, Y., Zhang, R., Zhang, J., Ye, Y., Luo, Z., & Ma, Y. March, 2024. arXiv:2403.13372 [cs]Paper doi abstract bibtex Efficient fine-tuning is vital for adapting large language models (LLMs) to downstream tasks. However, it requires non-trivial efforts to implement these methods on different models. We present LlamaFactory, a unified framework that integrates a suite of cutting-edge efficient training methods. It allows users to flexibly customize the fine-tuning of 100+ LLMs without the need for coding through the built-in web UI LlamaBoard. We empirically validate the efficiency and effectiveness of our framework on language modeling and text generation tasks. It has been released at https://github.com/hiyouga/LLaMA-Factory and already received over 13,000 stars and 1,600 forks.
@misc{zheng_llamafactory_2024,
title = {{LlamaFactory}: {Unified} {Efficient} {Fine}-{Tuning} of 100+ {Language} {Models}},
shorttitle = {{LlamaFactory}},
url = {http://arxiv.org/abs/2403.13372},
doi = {10.48550/arXiv.2403.13372},
abstract = {Efficient fine-tuning is vital for adapting large language models (LLMs) to downstream tasks. However, it requires non-trivial efforts to implement these methods on different models. We present LlamaFactory, a unified framework that integrates a suite of cutting-edge efficient training methods. It allows users to flexibly customize the fine-tuning of 100+ LLMs without the need for coding through the built-in web UI LlamaBoard. We empirically validate the efficiency and effectiveness of our framework on language modeling and text generation tasks. It has been released at https://github.com/hiyouga/LLaMA-Factory and already received over 13,000 stars and 1,600 forks.},
urldate = {2024-03-24},
publisher = {arXiv},
author = {Zheng, Yaowei and Zhang, Richong and Zhang, Junhao and Ye, Yanhan and Luo, Zheyan and Ma, Yongqiang},
month = mar,
year = {2024},
note = {arXiv:2403.13372 [cs]},
}
Downloads: 0
{"_id":"8gZ2Xk73Ba7YctfeH","bibbaseid":"zheng-zhang-zhang-ye-luo-ma-llamafactoryunifiedefficientfinetuningof100languagemodels-2024","author_short":["Zheng, Y.","Zhang, R.","Zhang, J.","Ye, Y.","Luo, Z.","Ma, Y."],"bibdata":{"bibtype":"misc","type":"misc","title":"LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models","shorttitle":"LlamaFactory","url":"http://arxiv.org/abs/2403.13372","doi":"10.48550/arXiv.2403.13372","abstract":"Efficient fine-tuning is vital for adapting large language models (LLMs) to downstream tasks. However, it requires non-trivial efforts to implement these methods on different models. We present LlamaFactory, a unified framework that integrates a suite of cutting-edge efficient training methods. It allows users to flexibly customize the fine-tuning of 100+ LLMs without the need for coding through the built-in web UI LlamaBoard. We empirically validate the efficiency and effectiveness of our framework on language modeling and text generation tasks. It has been released at https://github.com/hiyouga/LLaMA-Factory and already received over 13,000 stars and 1,600 forks.","urldate":"2024-03-24","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Zheng"],"firstnames":["Yaowei"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Richong"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Junhao"],"suffixes":[]},{"propositions":[],"lastnames":["Ye"],"firstnames":["Yanhan"],"suffixes":[]},{"propositions":[],"lastnames":["Luo"],"firstnames":["Zheyan"],"suffixes":[]},{"propositions":[],"lastnames":["Ma"],"firstnames":["Yongqiang"],"suffixes":[]}],"month":"March","year":"2024","note":"arXiv:2403.13372 [cs]","bibtex":"@misc{zheng_llamafactory_2024,\n\ttitle = {{LlamaFactory}: {Unified} {Efficient} {Fine}-{Tuning} of 100+ {Language} {Models}},\n\tshorttitle = {{LlamaFactory}},\n\turl = {http://arxiv.org/abs/2403.13372},\n\tdoi = {10.48550/arXiv.2403.13372},\n\tabstract = {Efficient fine-tuning is vital for adapting large language models (LLMs) to downstream tasks. However, it requires non-trivial efforts to implement these methods on different models. We present LlamaFactory, a unified framework that integrates a suite of cutting-edge efficient training methods. It allows users to flexibly customize the fine-tuning of 100+ LLMs without the need for coding through the built-in web UI LlamaBoard. We empirically validate the efficiency and effectiveness of our framework on language modeling and text generation tasks. It has been released at https://github.com/hiyouga/LLaMA-Factory and already received over 13,000 stars and 1,600 forks.},\n\turldate = {2024-03-24},\n\tpublisher = {arXiv},\n\tauthor = {Zheng, Yaowei and Zhang, Richong and Zhang, Junhao and Ye, Yanhan and Luo, Zheyan and Ma, Yongqiang},\n\tmonth = mar,\n\tyear = {2024},\n\tnote = {arXiv:2403.13372 [cs]},\n}\n\n","author_short":["Zheng, Y.","Zhang, R.","Zhang, J.","Ye, Y.","Luo, Z.","Ma, Y."],"key":"zheng_llamafactory_2024","id":"zheng_llamafactory_2024","bibbaseid":"zheng-zhang-zhang-ye-luo-ma-llamafactoryunifiedefficientfinetuningof100languagemodels-2024","role":"author","urls":{"Paper":"http://arxiv.org/abs/2403.13372"},"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"misc","biburl":"https://bibbase.org/zotero/andreasmartin","dataSources":["jurZeGzSpYdkQ8rm4"],"keywords":[],"search_terms":["llamafactory","unified","efficient","fine","tuning","100","language","models","zheng","zhang","zhang","ye","luo","ma"],"title":"LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models","year":2024}