fix deepseek-reasoner api requests
#11
by
nappenstance
- opened
app.py
CHANGED
@@ -256,6 +256,7 @@ class PAS2:
|
|
256 |
messages=messages
|
257 |
)
|
258 |
result = response.choices[0].message.content
|
|
|
259 |
else: # openai-compatible API
|
260 |
response = client.chat.completions.create(
|
261 |
model=model_id,
|
@@ -489,6 +490,18 @@ Evaluate these responses for hallucinations:\n\n{context}\n\n
|
|
489 |
content = response.choices[0].message.content
|
490 |
# Normal JSON parsing for mistral
|
491 |
result_json = json.loads(content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
else: # openai-compatible API
|
493 |
response = client.chat.completions.create(
|
494 |
model=model_id,
|
|
|
256 |
messages=messages
|
257 |
)
|
258 |
result = response.choices[0].message.content
|
259 |
+
|
260 |
else: # openai-compatible API
|
261 |
response = client.chat.completions.create(
|
262 |
model=model_id,
|
|
|
490 |
content = response.choices[0].message.content
|
491 |
# Normal JSON parsing for mistral
|
492 |
result_json = json.loads(content)
|
493 |
+
else if model_id == "deepseek-reasoner":
|
494 |
+
response = client.chat.completions.create(
|
495 |
+
model=model_id,
|
496 |
+
messages=[
|
497 |
+
{"role": "system", "content": customized_system_prompt},
|
498 |
+
{"role": "user", "content": user_content}
|
499 |
+
],
|
500 |
+
)
|
501 |
+
content = response.choices[0].message.content
|
502 |
+
|
503 |
+
result_json = json.loads(content)
|
504 |
+
|
505 |
else: # openai-compatible API
|
506 |
response = client.chat.completions.create(
|
507 |
model=model_id,
|