|  | --- | 
					
						
						|  | license: cc-by-nc-4.0 | 
					
						
						|  | language: | 
					
						
						|  | - ro | 
					
						
						|  | base_model: | 
					
						
						|  | - OpenLLM-Ro/RoLlama2-7b-Base | 
					
						
						|  | datasets: | 
					
						
						|  | - OpenLLM-Ro/ro_sft_alpaca | 
					
						
						|  | - OpenLLM-Ro/ro_sft_alpaca_gpt4 | 
					
						
						|  | - OpenLLM-Ro/ro_sft_dolly | 
					
						
						|  | - OpenLLM-Ro/ro_sft_selfinstruct_gpt4 | 
					
						
						|  | - OpenLLM-Ro/ro_sft_norobots | 
					
						
						|  | - OpenLLM-Ro/ro_sft_orca | 
					
						
						|  | - OpenLLM-Ro/ro_sft_camel | 
					
						
						|  | - OpenLLM-Ro/ro_sft_oasst | 
					
						
						|  | - OpenLLM-Ro/ro_sft_ultrachat | 
					
						
						|  | - OpenLLM-Ro/ro_sft_magpie_mt | 
					
						
						|  | - OpenLLM-Ro/ro_sft_magpie_reasoning | 
					
						
						|  | model-index: | 
					
						
						|  | - name: OpenLLM-Ro/RoLlama2-7b-Instruct-2025-04-23 | 
					
						
						|  | results: | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: RoMT-Bench | 
					
						
						|  | type: RoMT-Bench | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Score | 
					
						
						|  | type: Score | 
					
						
						|  | value: 4.97 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: RoCulturaBench | 
					
						
						|  | type: RoCulturaBench | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Score | 
					
						
						|  | type: Score | 
					
						
						|  | value: 4.56 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: Romanian_Academic_Benchmarks | 
					
						
						|  | type: Romanian_Academic_Benchmarks | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average accuracy | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 45.51 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_arc_challenge | 
					
						
						|  | type: OpenLLM-Ro/ro_arc_challenge | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average accuracy | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 45.7 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_mmlu | 
					
						
						|  | type: OpenLLM-Ro/ro_mmlu | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average accuracy | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 40.36 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_winogrande | 
					
						
						|  | type: OpenLLM-Ro/ro_winogrande | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average accuracy | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 63.26 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_hellaswag | 
					
						
						|  | type: OpenLLM-Ro/ro_hellaswag | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average accuracy | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 60.25 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_gsm8k | 
					
						
						|  | type: OpenLLM-Ro/ro_gsm8k | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average accuracy | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 18.02 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_truthfulqa | 
					
						
						|  | type: OpenLLM-Ro/ro_truthfulqa | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average accuracy | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 45.48 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: LaRoSeDa_binary | 
					
						
						|  | type: LaRoSeDa_binary | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average macro-f1 | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 97.6 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: LaRoSeDa_multiclass | 
					
						
						|  | type: LaRoSeDa_multiclass | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average macro-f1 | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 60.22 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: WMT_EN-RO | 
					
						
						|  | type: WMT_EN-RO | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average bleu | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 27.21 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: WMT_RO-EN | 
					
						
						|  | type: WMT_RO-EN | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average bleu | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 22.15 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: XQuAD | 
					
						
						|  | type: XQuAD | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average exact_match | 
					
						
						|  | type: exact_match | 
					
						
						|  | value: 47.39 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: XQuAD | 
					
						
						|  | type: XQuAD | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average f1 | 
					
						
						|  | type: f1 | 
					
						
						|  | value: 65.77 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: STS | 
					
						
						|  | type: STS | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average spearman | 
					
						
						|  | type: spearman | 
					
						
						|  | value: 59.05 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: STS | 
					
						
						|  | type: STS | 
					
						
						|  | metrics: | 
					
						
						|  | - name: Average pearson | 
					
						
						|  | type: pearson | 
					
						
						|  | value: 56.45 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: RoMT-Bench | 
					
						
						|  | type: RoMT-Bench | 
					
						
						|  | metrics: | 
					
						
						|  | - name: First turn | 
					
						
						|  | type: Score | 
					
						
						|  | value: 5.56 | 
					
						
						|  | - name: Second turn | 
					
						
						|  | type: Score | 
					
						
						|  | value: 4.39 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_arc_challenge | 
					
						
						|  | type: OpenLLM-Ro/ro_arc_challenge | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 43.02 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 45.84 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 45.24 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 46.19 | 
					
						
						|  | - name: 10-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 46.7 | 
					
						
						|  | - name: 25-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 47.22 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_mmlu | 
					
						
						|  | type: OpenLLM-Ro/ro_mmlu | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 38.64 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 40.77 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 41.19 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 40.86 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_winogrande | 
					
						
						|  | type: OpenLLM-Ro/ro_winogrande | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 63.61 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 62.75 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 63.46 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 63.22 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_hellaswag | 
					
						
						|  | type: OpenLLM-Ro/ro_hellaswag | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 59.79 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 59.62 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 60.12 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 60.71 | 
					
						
						|  | - name: 10-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 61.01 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: OpenLLM-Ro/ro_gsm8k | 
					
						
						|  | type: OpenLLM-Ro/ro_gsm8k | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 6.14 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 22.52 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: accuracy | 
					
						
						|  | value: 25.4 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: LaRoSeDa_binary | 
					
						
						|  | type: LaRoSeDa_binary | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 98.17 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 96.3 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 97.8 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 98.13 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: LaRoSeDa_multiclass | 
					
						
						|  | type: LaRoSeDa_multiclass | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 49.8 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 56.03 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 65.33 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: macro-f1 | 
					
						
						|  | value: 69.7 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: WMT_EN-RO | 
					
						
						|  | type: WMT_EN-RO | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 19.34 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 29.89 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 29.99 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 29.62 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: WMT_RO-EN | 
					
						
						|  | type: WMT_RO-EN | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 2.29 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 14.74 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 34.82 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: bleu | 
					
						
						|  | value: 36.75 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: XQuAD_EM | 
					
						
						|  | type: XQuAD_EM | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: exact_match | 
					
						
						|  | value: 42.86 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: exact_match | 
					
						
						|  | value: 47.82 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: exact_match | 
					
						
						|  | value: 48.32 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: exact_match | 
					
						
						|  | value: 50.59 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: XQuAD_F1 | 
					
						
						|  | type: XQuAD_F1 | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 0-shot | 
					
						
						|  | type: f1 | 
					
						
						|  | value: 63.66 | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: f1 | 
					
						
						|  | value: 65.27 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: f1 | 
					
						
						|  | value: 66.04 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: f1 | 
					
						
						|  | value: 68.12 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: STS_Spearman | 
					
						
						|  | type: STS_Spearman | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: spearman | 
					
						
						|  | value: 54.51 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: spearman | 
					
						
						|  | value: 60.98 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: spearman | 
					
						
						|  | value: 61.65 | 
					
						
						|  | - task: | 
					
						
						|  | type: text-generation | 
					
						
						|  | dataset: | 
					
						
						|  | name: STS_Pearson | 
					
						
						|  | type: STS_Pearson | 
					
						
						|  | metrics: | 
					
						
						|  | - name: 1-shot | 
					
						
						|  | type: pearson | 
					
						
						|  | value: 54.35 | 
					
						
						|  | - name: 3-shot | 
					
						
						|  | type: pearson | 
					
						
						|  | value: 57.88 | 
					
						
						|  | - name: 5-shot | 
					
						
						|  | type: pearson | 
					
						
						|  | value: 57.13 | 
					
						
						|  | --- | 
					
						
						|  |  | 
					
						
						|  | # Model Card for Model ID | 
					
						
						|  |  | 
					
						
						|  | <!-- Provide a quick summary of what the model is/does. --> | 
					
						
						|  |  | 
					
						
						|  | RoLlama2 is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **instruct 7B model**. Links to other models can be found at the bottom of this page. | 
					
						
						|  |  | 
					
						
						|  | ## Model Details | 
					
						
						|  |  | 
					
						
						|  | ### Model Description | 
					
						
						|  |  | 
					
						
						|  | <!-- Provide a longer summary of what this model is. --> | 
					
						
						|  | OpenLLM represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants. | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | - **Developed by:** OpenLLM-Ro | 
					
						
						|  | <!-- - **Funded by [optional]:** [More Information Needed] --> | 
					
						
						|  | <!-- - **Shared by [optional]:** [More Information Needed] --> | 
					
						
						|  | <!-- - **Model type:** [More Information Needed] --> | 
					
						
						|  | - **Language(s):** Romanian | 
					
						
						|  | - **License:** cc-by-nc-4.0 | 
					
						
						|  | - **Finetuned from model:** [RoLlama2-7b-Base](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Base) | 
					
						
						|  | - **Trained using:** [RoAlpaca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca), [RoAlpacaGPT4](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca_gpt4), [RoDolly](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_dolly), [RoSelfInstruct](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_selfinstruct_gpt4), [RoNoRobots](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_norobots), [RoOrca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_orca), [RoCamel](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_camel), [RoOpenAssistant](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_oasst), [RoUltraChat](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_ultrachat), [RoMagpiePro](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_magpie_mt), [RoMagpieReasoning](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_magpie_reasoning) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | ### Model Sources | 
					
						
						|  |  | 
					
						
						|  | <!-- Provide the basic links for the model. --> | 
					
						
						|  |  | 
					
						
						|  | - **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory | 
					
						
						|  | - **Paper:** https://arxiv.org/abs/2406.18266 | 
					
						
						|  |  | 
					
						
						|  | ## Intended Use | 
					
						
						|  |  | 
					
						
						|  | ### Intended Use Cases | 
					
						
						|  |  | 
					
						
						|  | RoLlama2 is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat. | 
					
						
						|  |  | 
					
						
						|  | ### Out-of-Scope Use | 
					
						
						|  |  | 
					
						
						|  | <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> | 
					
						
						|  |  | 
					
						
						|  | Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian. | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | ## How to Get Started with the Model | 
					
						
						|  |  | 
					
						
						|  | Use the code below to get started with the model. | 
					
						
						|  |  | 
					
						
						|  | ```python | 
					
						
						|  | from transformers import AutoTokenizer, AutoModelForCausalLM | 
					
						
						|  |  | 
					
						
						|  | tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoLlama2-7b-Instruct-2025-04-23") | 
					
						
						|  | model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoLlama2-7b-Instruct-2025-04-23") | 
					
						
						|  |  | 
					
						
						|  | instruction = "Care este cel mai înalt vârf muntos din România?" | 
					
						
						|  | chat = [ | 
					
						
						|  | {"role": "system", "content": "Ești un asistent folositor, respectuos și onest. Încearcă să ajuți cât mai mult prin informațiile oferite, excluzând răspunsuri toxice, rasiste, sexiste, periculoase și ilegale."}, | 
					
						
						|  | {"role": "user", "content": instruction}, | 
					
						
						|  | ] | 
					
						
						|  | prompt = tokenizer.apply_chat_template(chat, tokenize=False) | 
					
						
						|  |  | 
					
						
						|  | inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") | 
					
						
						|  | outputs = model.generate(input_ids=inputs, max_new_tokens=128) | 
					
						
						|  | print(tokenizer.decode(outputs[0])) | 
					
						
						|  | ``` | 
					
						
						|  |  | 
					
						
						|  | ## Academic Benchmarks | 
					
						
						|  |  | 
					
						
						|  | <table> | 
					
						
						|  | <tbody> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><strong>Model</strong></td> | 
					
						
						|  | <td><strong><center>Average</center></strong></td> | 
					
						
						|  | <td><strong><center>ARC</center></strong></td> | 
					
						
						|  | <td><strong><center>MMLU</center></strong></td> | 
					
						
						|  | <td><strong><center>Winogrande</center></strong></td> | 
					
						
						|  | <td><strong><center>Hellaswag</center></strong></td> | 
					
						
						|  | <td><strong><center>GSM8k</center></strong></td> | 
					
						
						|  | <td><strong><center>TruthfulQA</center></strong></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>Llama-2-7b-chat</td><td><center>36.84</center></td><td><center>37.03</center></td><td><center>33.80</center></td><td><center>55.87</center></td><td><center>45.36</center></td><td><center>4.90</center></td><td><center>44.09</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>45.71</center></td><td><center>43.66</center></td><td><center>39.70</center></td><td><center><strong>70.34</strong></center></td><td><center>57.36</center></td><td><center><strong>18.78</strong></center></td><td><center>44.44</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>44.50</center></td><td><center>44.73</center></td><td><center>40.39</center></td><td><center>63.67</center></td><td><center>59.12</center></td><td><center>13.29</center></td><td><center>45.78</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><em>RoLlama2-7b-Instruct-2025-04-23</em></td><td><center><em>45.51</em></center></td><td><center><em>45.70</em></center></td><td><center><em>40.36</em></center></td><td><center><em>63.26</em></center></td><td><center><em>60.25</em></center></td><td><center><em>18.02</em></center></td><td><center><em>45.48</em></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2024-10-09</td><td><center>43.20</center></td><td><center>44.24</center></td><td><center>38.39</center></td><td><center>62.57</center></td><td><center>59.20</center></td><td><center>15.72</center></td><td><center>39.07</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2025-04-23</td><td><center><strong>46.77</strong></center></td><td><center><strong>48.16</strong></center></td><td><center><strong>41.38</strong></center></td><td><center>64.15</center></td><td><center><strong>61.37</strong></center></td><td><center>18.35</center></td><td><center><strong>47.20</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | </tbody> | 
					
						
						|  | </table> | 
					
						
						|  |  | 
					
						
						|  | ## Downstream tasks | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | <table> | 
					
						
						|  | <tbody> | 
					
						
						|  | <tr> | 
					
						
						|  | <td></td> | 
					
						
						|  | <td colspan="4"><center><strong>LaRoSeDa</strong></center></td> | 
					
						
						|  | <td colspan="4"><center><strong>WMT</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td></td> | 
					
						
						|  | <td colspan="2"><center><strong>Few-shot</strong></center></td> | 
					
						
						|  | <td colspan="2"><center><strong>Finetuned</strong></center></td> | 
					
						
						|  | <td colspan="2"><center><strong>Few-shot</strong></center></td> | 
					
						
						|  | <td colspan="2"><center><strong>Finetuned</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><strong>Model</strong></td> | 
					
						
						|  | <td><center><strong>Binary<br>(Macro F1)</strong></center></td> | 
					
						
						|  | <td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> | 
					
						
						|  | <td><center><strong>Binary<br>(Macro F1)</strong></center></td> | 
					
						
						|  | <td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> | 
					
						
						|  | <td><center><strong>EN-RO<br>(Bleu)</strong></center></td> | 
					
						
						|  | <td><center><strong>RO-EN<br>(Bleu)</strong></center></td> | 
					
						
						|  | <td><center><strong>EN-RO<br>(Bleu)</strong></center></td> | 
					
						
						|  | <td><center><strong>RO-EN<br>(Bleu)</strong></center> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>Llama-2-7b-chat</td><td><center>87.78</center></td><td><center>52.81</center></td><td><center>97.27</center></td><td><center>82.02</center></td><td><center>15.55</center></td><td><center><strong>28.53</strong></center></td><td><center>19.99</center></td><td><center>31.48</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>97.48</center></td><td><center><strong>65.26</strong></center></td><td><center><strong>98.83</strong></center></td><td><center><strong>87.28</strong></center></td><td><center><strong>27.38</strong></center></td><td><center>10.32</center></td><td><center>27.59</center></td><td><center><strong>40.13</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>97.66</center></td><td><center>62.41</center></td><td><center>97.97</center></td><td><center>60.89</center></td><td><center>27.13</center></td><td><center>19.39</center></td><td><center><strong>27.63</strong></center></td><td><center>39.75</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><em>RoLlama2-7b-Instruct-2025-04-23</em></td><td><center><em>97.60</em></center></td><td><center><em>60.22</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>27.21</em></center></td><td><center><em>22.15</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2024-10-09</td><td><center>97.31</center></td><td><center>60.56</center></td><td><center>-</center></td><td><center>-</center></td><td><center>26.56</center></td><td><center>21.68</center></td><td><center>-</center></td><td><center>-</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2025-04-23</td><td><center><strong>97.77</strong></center></td><td><center>65.21</center></td><td><center>-</center></td><td><center>-</center></td><td><center>25.48</center></td><td><center>22.75</center></td><td><center>-</center></td><td><center>-</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | </tbody> | 
					
						
						|  | </table> | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | <table> | 
					
						
						|  | <tbody> | 
					
						
						|  | <tr> | 
					
						
						|  | <td></td> | 
					
						
						|  | <td colspan="4"><center><strong>XQuAD</strong></center></td> | 
					
						
						|  | <td colspan="4"><center><strong>STS</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td></td> | 
					
						
						|  | <td colspan="2"><center><strong>Few-shot</strong></center></td> | 
					
						
						|  | <td colspan="2"><center><strong>Finetuned</strong></center></td> | 
					
						
						|  | <td colspan="2"><center><strong>Few-shot</strong></center></td> | 
					
						
						|  | <td colspan="2"><center><strong>Finetuned</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><strong>Model</strong></td> | 
					
						
						|  | <td><center><strong>(EM)</strong></center></td> | 
					
						
						|  | <td><center><strong>(F1)</strong></center></td> | 
					
						
						|  | <td><center><strong>(EM)</strong></center></td> | 
					
						
						|  | <td><center><strong>(F1)</strong></center></td> | 
					
						
						|  | <td><center><strong>(Spearman)</strong></center></td> | 
					
						
						|  | <td><center><strong>(Pearson)</strong></center></td> | 
					
						
						|  | <td><center><strong>(Spearman)</strong></center></td> | 
					
						
						|  | <td><center><strong>(Pearson)</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>Llama-2-7b-chat</td><td><center>32.35</center></td><td><center>54.00</center></td><td><center><strong>60.34</strong></center></td><td><center><strong>75.98</strong></center></td><td><center>32.56</center></td><td><center>31.99</center></td><td><center>74.08</center></td><td><center>72.64</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>44.52</center></td><td><center>64.75</center></td><td><center>54.96</center></td><td><center>70.20</center></td><td><center>65.50</center></td><td><center><strong>67.79</strong></center></td><td><center>84.44</center></td><td><center>84.76</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>45.71</center></td><td><center>65.08</center></td><td><center>59.24</center></td><td><center>74.25</center></td><td><center>59.69</center></td><td><center>57.16</center></td><td><center><strong>84.66</strong></center></td><td><center><strong>85.07</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><em>RoLlama2-7b-Instruct-2025-04-23</em></td><td><center><em><strong>47.39</strong></em></center></td><td><center><em><strong>65.77</strong></em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>59.05</em></center></td><td><center><em>56.45</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2024-10-09</td><td><center>35.78</center></td><td><center>59.31</center></td><td><center>-</center></td><td><center>-</center></td><td><center>61.22</center></td><td><center>58.41</center></td><td><center>-</center></td><td><center>-</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2025-04-23</td><td><center>38.28</center></td><td><center>60.88</center></td><td><center>-</center></td><td><center>-</center></td><td><center><strong>66.76</strong></center></td><td><center>64.72</center></td><td><center>-</center></td><td><center>-</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | </tbody> | 
					
						
						|  | </table> | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | ## Romanian MT-Bench | 
					
						
						|  |  | 
					
						
						|  | <table> | 
					
						
						|  | <tbody> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><strong>Model</strong></td> | 
					
						
						|  | <td><strong><center>Average</center></strong></td> | 
					
						
						|  | <td><strong><center>1st turn</center></strong></td> | 
					
						
						|  | <td><strong><center>2nd turn</center></strong></td> | 
					
						
						|  | <td><strong><center>Answers in Ro</center></strong></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>Llama-2-7b-chat</td><td><center>1.08</center></td><td><center>1.44</center></td><td><center>0.73</center></td><td><center>45/160</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>3.86</center></td><td><center>4.67</center></td><td><center>3.04</center></td><td><center><strong>160/160</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>4.43</center></td><td><center>4.92</center></td><td><center>3.94</center></td><td><center><strong>160/160</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><em>RoLlama2-7b-Instruct-2025-04-23</em></td><td><center><em>4.97</em></center></td><td><center><em>5.56</em></center></td><td><center><em>4.39</em></center></td><td><center><em><strong>160/160</strong></em></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2024-10-09</td><td><center>4.61</center></td><td><center>5.15</center></td><td><center>4.06</center></td><td><center><strong>160/160</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2025-04-23</td><td><center><strong>5.55</strong></center></td><td><center><strong>5.84</strong></center></td><td><center><strong>5.26</strong></center></td><td><center><strong>160/160</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | </tbody> | 
					
						
						|  | </table> | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | ## RoCulturaBench | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | <table> | 
					
						
						|  | <tbody> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><strong>Model</strong></td> | 
					
						
						|  | <td><strong><center>Average</center></strong></td> | 
					
						
						|  | <td><strong><center>Answers in Ro</center></strong></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>Llama-2-7b-chat</td><td><center>1.21</center></td><td><center>33/100</center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>3.77</center></td><td><center><strong>100/100</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>4.08</center></td><td><center><strong>100/100</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td><em>RoLlama2-7b-Instruct-2025-04-23</em></td><td><center><em>4.56</em></center></td><td><center><em><strong>100/100</strong></em></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2024-10-09</td><td><center>4.80</center></td><td><center><strong>100/100</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | <tr> | 
					
						
						|  | <td>RoLlama2-7b-Instruct-DPO-2025-04-23</td><td><center><strong>5.24</strong></center></td><td><center><strong>100/100</strong></center></td> | 
					
						
						|  | </tr> | 
					
						
						|  | </tbody> | 
					
						
						|  | </table> | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | ## RoLlama2 Model Family | 
					
						
						|  |  | 
					
						
						|  | | Model              | Link  | | 
					
						
						|  | |--------------------|:--------:| | 
					
						
						|  | |RoLlama2-7b-Base-2024-05-14 | [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Base-2024-05-14)    | | 
					
						
						|  | |RoLlama2-7b-Instruct-2024-05-14 | [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-2024-05-14)    | | 
					
						
						|  | |RoLlama2-7b-Instruct-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-2024-10-09) | | 
					
						
						|  | |*RoLlama2-7b-Instruct-2025-04-23*| [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-2025-04-23) | | 
					
						
						|  | |RoLlama2-7b-Instruct-DPO-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-DPO-2024-10-09) | | 
					
						
						|  | |RoLlama2-7b-Instruct-DPO-2025-04-23| [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-DPO-2025-04-23) | | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | ## Citation | 
					
						
						|  |  | 
					
						
						|  | ``` | 
					
						
						|  | @misc{masala2024vorbecstiromanecsterecipetrain, | 
					
						
						|  | title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions}, | 
					
						
						|  | author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea}, | 
					
						
						|  | year={2024}, | 
					
						
						|  | eprint={2406.18266}, | 
					
						
						|  | archivePrefix={arXiv}, | 
					
						
						|  | primaryClass={cs.CL}, | 
					
						
						|  | url={https://arxiv.org/abs/2406.18266}, | 
					
						
						|  | } | 
					
						
						|  | ``` | 
					
						
						|  | <!-- **APA:** | 
					
						
						|  |  | 
					
						
						|  | [More Information Needed]  --> |