zhilinw commited on
Commit
2923390
·
verified ·
1 Parent(s): 3b1ed34

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -34,11 +34,11 @@ As of 18 Mar 2025, augmenting models with the Feedback-Edit Inference Time Scali
34
 
35
  | Model | Arena Hard (95% CI) |
36
  |:-----------------------------|:----------------|
37
- | Llama-3.3-Nemotron-49B-Instruct + **Feedback-Edit ITS** | **93.4 (-1.1, 1.0)** |
38
  | Llama-3.1-Nemotron-70B-Instruct + **Feedback-Edit ITS** | 92.7 (-1.2, 0.9) |
39
  | o1-mini-2024-09-12 | 92.0 (-1.2, 1.0) |
40
  | o1-preview-2024-09-12 | 90.4 (-1.1, 1.3) |
41
- | Llama-3.3-Nemotron-49B-Instruct | 88.3 (-1.6, 1.6) |
42
  | claude-3-5-sonnet-20241022 | 85.2 (-1.4, 1.6) |
43
  | Llama-3.1-Nemotron-70B-Instruct | 84.9 (-1.7, 1.8) |
44
 
@@ -105,7 +105,7 @@ This code has been tested on Transformers v4.45.0, torch v2.3.0a0+40ec155e58.nv2
105
  import torch
106
  from transformers import AutoModelForCausalLM, AutoTokenizer
107
 
108
- model_name = "nvidia/Llama-3.3-Nemotron-70B-Feedback-HF"
109
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
110
  tokenizer = AutoTokenizer.from_pretrained(model_name)
111
 
 
34
 
35
  | Model | Arena Hard (95% CI) |
36
  |:-----------------------------|:----------------|
37
+ | Llama-3.3-Nemotron-Super-49B-v1 + **Feedback-Edit ITS** | **93.4 (-1.1, 1.0)** |
38
  | Llama-3.1-Nemotron-70B-Instruct + **Feedback-Edit ITS** | 92.7 (-1.2, 0.9) |
39
  | o1-mini-2024-09-12 | 92.0 (-1.2, 1.0) |
40
  | o1-preview-2024-09-12 | 90.4 (-1.1, 1.3) |
41
+ | Llama-3.3-Nemotron-Super-49B-v1 | 88.3 (-1.6, 1.6) |
42
  | claude-3-5-sonnet-20241022 | 85.2 (-1.4, 1.6) |
43
  | Llama-3.1-Nemotron-70B-Instruct | 84.9 (-1.7, 1.8) |
44
 
 
105
  import torch
106
  from transformers import AutoModelForCausalLM, AutoTokenizer
107
 
108
+ model_name = "nvidia/Llama-3.3-Nemotron-70B-Feedback"
109
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
110
  tokenizer = AutoTokenizer.from_pretrained(model_name)
111