Yousefsalem commited on
Commit
fc12b3a
·
verified ·
1 Parent(s): 322a467

Update src/models.py

Browse files
Files changed (1) hide show
  1. src/models.py +4 -4
src/models.py CHANGED
@@ -11,8 +11,8 @@ def download_models():
11
  1. BioMistral-7B - A medical-specific language model.
12
  2. Llama-2-7B - A general-purpose language model.
13
  """
14
- os.system('wget -O BioMistral-7B.Q4_K_M.gguf "https://huggingface.co/MaziyarPanahi/BioMistral-7B-GGUF/resolve/main/BioMistral-7B.Q4_K_M.gguf?download=true"')
15
- os.system('wget -O llama-2-7b.Q4_K_M.gguf "https://huggingface.co/TheBloke/Llama-2-7B-GGUF/resolve/main/llama-2-7b.Q4_K_M.gguf?download=true"')
16
 
17
  # Call the download function to ensure the models are available before starting the bot
18
  download_models()
@@ -25,14 +25,14 @@ classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnl
25
 
26
  # Load LLM models using CTransformers
27
  general_llm = CTransformers(
28
- model="/kaggle/working/llama-2-7b.Q8_0.gguf",
29
  model_type="llama",
30
  config={'max_new_tokens': 512, 'temperature': 0.7},
31
  stream=True # Enable streaming here
32
  )
33
 
34
  medical_llm = CTransformers(
35
- model="/kaggle/working/BioMistral-7B.Q8_0.gguf",
36
  model_type="llama",
37
  config={'max_new_tokens': 512, 'temperature': 0.7},
38
  stream=True # Enable streaming here
 
11
  1. BioMistral-7B - A medical-specific language model.
12
  2. Llama-2-7B - A general-purpose language model.
13
  """
14
+ os.system('wget -O llama-2-7b.Q8_0.gguf "https://huggingface.co/TheBloke/Llama-2-7B-GGUF/resolve/main/llama-2-7b.Q8_0.gguf?download=true')
15
+ os.system("wget -O BioMistral-7B.Q8_0.gguf "https://huggingface.co/MaziyarPanahi/BioMistral-7B-GGUF/resolve/main/BioMistral-7B.Q8_0.gguf?download=true")
16
 
17
  # Call the download function to ensure the models are available before starting the bot
18
  download_models()
 
25
 
26
  # Load LLM models using CTransformers
27
  general_llm = CTransformers(
28
+ model="./llama-2-7b.Q8_0.gguf",
29
  model_type="llama",
30
  config={'max_new_tokens': 512, 'temperature': 0.7},
31
  stream=True # Enable streaming here
32
  )
33
 
34
  medical_llm = CTransformers(
35
+ model="./BioMistral-7B.Q8_0.gguf",
36
  model_type="llama",
37
  config={'max_new_tokens': 512, 'temperature': 0.7},
38
  stream=True # Enable streaming here