Fix: Corriger le code d'utilisation (suppression des références LoRA erronées)
Browse files
README.md
CHANGED
@@ -64,7 +64,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
64 |
|
65 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
66 |
|
67 |
-
def generate_medical_response(question, max_length=500, temperature=0.
|
68 |
messages = [
|
69 |
{
|
70 |
"role": "system",
|
@@ -93,9 +93,7 @@ def generate_medical_response(question, max_length=500, temperature=0.7):
|
|
93 |
do_sample=True,
|
94 |
temperature=temperature,
|
95 |
pad_token_id=tokenizer.eos_token_id,
|
96 |
-
|
97 |
-
repetition_penalty=1.1,
|
98 |
-
no_repeat_ngram_size=3
|
99 |
)
|
100 |
|
101 |
response = tokenizer.decode(outputs[0][input_len:], skip_special_tokens=True)
|
|
|
64 |
|
65 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
66 |
|
67 |
+
def generate_medical_response(question, max_length=500, temperature=0.2):
|
68 |
messages = [
|
69 |
{
|
70 |
"role": "system",
|
|
|
93 |
do_sample=True,
|
94 |
temperature=temperature,
|
95 |
pad_token_id=tokenizer.eos_token_id,
|
96 |
+
|
|
|
|
|
97 |
)
|
98 |
|
99 |
response = tokenizer.decode(outputs[0][input_len:], skip_special_tokens=True)
|