Update README.md
Browse files
README.md
CHANGED
@@ -95,23 +95,27 @@ chat_input = tokenizer.apply_chat_template(
|
|
95 |
inputs = tokenizer(chat_input, return_tensors="pt").to(model.device)
|
96 |
|
97 |
# ํ
์คํธ ์์ฑ
|
98 |
-
outputs = model.generate(
|
99 |
**inputs,
|
100 |
max_new_tokens=256,
|
101 |
-
temperature=0.7,
|
102 |
-
top_p=0.8,
|
103 |
top_k=20,
|
104 |
-
|
|
|
|
|
|
|
105 |
)
|
106 |
|
107 |
# ๊ฒฐ๊ณผ ๋์ฝ๋ฉ ๋ฐ ์ถ๋ ฅ
|
108 |
-
|
|
|
109 |
|
110 |
-
# ์์ฑ๋ ๋ต๋ณ๋ง ์ถ์ถ
|
111 |
-
answer = response.split("assistant\n")[-1].strip()
|
112 |
print(f"์ง๋ฌธ: {question}")
|
113 |
print(f"๋ต๋ณ: {answer}")
|
114 |
|
|
|
115 |
# ์์ ์ถ๋ ฅ:
|
116 |
# ์ง๋ฌธ: ์ ๋ ์ฌ์ง ์ฐ๋ ๊ฑธ ์ข์ํด์.
|
117 |
# ๋ต๋ณ: ์ฌ์ง์๊ฐ๋ ์ด์์ค๊ณ ใ
ใ
ใ
์ผ๊ฐ๋ ๊ผญ ์ฐ์ธ์!
|
|
|
95 |
inputs = tokenizer(chat_input, return_tensors="pt").to(model.device)
|
96 |
|
97 |
# ํ
์คํธ ์์ฑ
|
98 |
+
outputs = self.model.generate(
|
99 |
**inputs,
|
100 |
max_new_tokens=256,
|
101 |
+
temperature=0.7,
|
102 |
+
top_p=0.8,
|
103 |
top_k=20,
|
104 |
+
min_p=0,
|
105 |
+
repetition_penalty=1.15,
|
106 |
+
do_sample=True,
|
107 |
+
pad_token_id=tokenizer.eos_token_id
|
108 |
)
|
109 |
|
110 |
# ๊ฒฐ๊ณผ ๋์ฝ๋ฉ ๋ฐ ์ถ๋ ฅ
|
111 |
+
response_ids = outputs[0][len(inputs.input_ids[0]):]
|
112 |
+
answer = tokenizer.decode(response_ids, skip_special_tokens=True)
|
113 |
|
114 |
+
# ์์ฑ๋ ๋ต๋ณ๋ง ์ถ์ถ
|
|
|
115 |
print(f"์ง๋ฌธ: {question}")
|
116 |
print(f"๋ต๋ณ: {answer}")
|
117 |
|
118 |
+
|
119 |
# ์์ ์ถ๋ ฅ:
|
120 |
# ์ง๋ฌธ: ์ ๋ ์ฌ์ง ์ฐ๋ ๊ฑธ ์ข์ํด์.
|
121 |
# ๋ต๋ณ: ์ฌ์ง์๊ฐ๋ ์ด์์ค๊ณ ใ
ใ
ใ
์ผ๊ฐ๋ ๊ผญ ์ฐ์ธ์!
|