Sadou commited on
Commit
907ade7
·
verified ·
1 Parent(s): 4c3e2e7

Fix: Corriger le code d'utilisation (suppression des références LoRA erronées)

Browse files
Files changed (1) hide show
  1. README.md +2 -4
README.md CHANGED
@@ -64,7 +64,7 @@ model = AutoModelForCausalLM.from_pretrained(
64
 
65
  tokenizer = AutoTokenizer.from_pretrained(model_id)
66
 
67
- def generate_medical_response(question, max_length=500, temperature=0.7):
68
  messages = [
69
  {
70
  "role": "system",
@@ -93,9 +93,7 @@ def generate_medical_response(question, max_length=500, temperature=0.7):
93
  do_sample=True,
94
  temperature=temperature,
95
  pad_token_id=tokenizer.eos_token_id,
96
- eos_token_id=tokenizer.eos_token_id,
97
- repetition_penalty=1.1,
98
- no_repeat_ngram_size=3
99
  )
100
 
101
  response = tokenizer.decode(outputs[0][input_len:], skip_special_tokens=True)
 
64
 
65
  tokenizer = AutoTokenizer.from_pretrained(model_id)
66
 
67
+ def generate_medical_response(question, max_length=500, temperature=0.2):
68
  messages = [
69
  {
70
  "role": "system",
 
93
  do_sample=True,
94
  temperature=temperature,
95
  pad_token_id=tokenizer.eos_token_id,
96
+
 
 
97
  )
98
 
99
  response = tokenizer.decode(outputs[0][input_len:], skip_special_tokens=True)