Gregor Betz
commited on
comments
Browse files- config.yaml +4 -4
config.yaml
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
client_llm:
|
2 |
-
url: "" # <--
|
3 |
-
model_id: "HuggingFaceH4/zephyr-7b-beta"
|
4 |
max_tokens: 800
|
5 |
temperature: 0.6
|
6 |
expert_llm:
|
7 |
-
url: "" # <--
|
8 |
model_id: "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
9 |
classifier_llm:
|
10 |
model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
|
11 |
-
url: "" # <--
|
12 |
batch_size: 8
|
|
|
1 |
client_llm:
|
2 |
+
url: "" # <-- start your own inference endpoint here and provide url (or use https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta)
|
3 |
+
model_id: "HuggingFaceH4/zephyr-7b-beta" # <-- your client llm
|
4 |
max_tokens: 800
|
5 |
temperature: 0.6
|
6 |
expert_llm:
|
7 |
+
url: "" # <-- start your own inference endpoint here and provide url (or use https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3.1-70B-Instruct)
|
8 |
model_id: "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
9 |
classifier_llm:
|
10 |
model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
|
11 |
+
url: "" # <-- start your own inference endpoint of classifier model here and provide url
|
12 |
batch_size: 8
|