Update README.md
Browse files
README.md
CHANGED
@@ -22,14 +22,23 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
22 |
|
23 |
tokenizer_name = "Google/gemma-2-2b-it"
|
24 |
model_name="lmassaron/gemma-2-2b-it-grpo-gsm8k"
|
25 |
-
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name,
|
26 |
-
|
|
|
|
|
|
|
27 |
|
28 |
FORMAT = """<reasoning>\n</reasoning>\n<answer>\n</answer>\n"""
|
29 |
|
30 |
question = "Which is bigger? 9.11 or 9.9?"
|
31 |
-
generator = pipeline("text-generation",
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
print(output["generated_text"])
|
34 |
```
|
35 |
|
|
|
22 |
|
23 |
tokenizer_name = "Google/gemma-2-2b-it"
|
24 |
model_name="lmassaron/gemma-2-2b-it-grpo-gsm8k"
|
25 |
+
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name,
|
26 |
+
trust_remote_code=True)
|
27 |
+
model = AutoModelForCausalLM.from_pretrained(model_name,
|
28 |
+
device_map="auto",
|
29 |
+
use_cache=True)
|
30 |
|
31 |
FORMAT = """<reasoning>\n</reasoning>\n<answer>\n</answer>\n"""
|
32 |
|
33 |
question = "Which is bigger? 9.11 or 9.9?"
|
34 |
+
generator = pipeline("text-generation",
|
35 |
+
model=model,
|
36 |
+
tokenizer=tokenizer,
|
37 |
+
do_sample=False,
|
38 |
+
batch_size=1)
|
39 |
+
output = generator([{"role": "user", "content": FORMAT + question}],
|
40 |
+
max_new_tokens=256,
|
41 |
+
return_full_text=False)[0]
|
42 |
print(output["generated_text"])
|
43 |
```
|
44 |
|