marham-traversaal
commited on
Commit
•
52d0073
1
Parent(s):
d342040
Remove extra print statements
Browse files
services/openai_service.py
CHANGED
@@ -28,8 +28,6 @@ except Exception as e:
|
|
28 |
|
29 |
def generate_rag_response(json_output, user_query):
|
30 |
logging.info("Generating RAG response")
|
31 |
-
print("JSON INPUT FOR RAG RESPONE:")
|
32 |
-
print(json_output)
|
33 |
# Extract text from the JSON output
|
34 |
context_texts = [hit['chunk_text'] for hit in json_output]
|
35 |
|
@@ -50,7 +48,6 @@ def generate_rag_response(json_output, user_query):
|
|
50 |
max_tokens=2000, # Limit the maximum number of tokens in the response
|
51 |
temperature=0.5
|
52 |
)
|
53 |
-
print(f"GENERATED RESPONSE FROM OPENAI: {chat_completion}")
|
54 |
# Log the response from the model
|
55 |
logging.info("RAG response generation completed")
|
56 |
logging.info(f"RAG response: {chat_completion.choices[0].message.content}")
|
|
|
28 |
|
29 |
def generate_rag_response(json_output, user_query):
|
30 |
logging.info("Generating RAG response")
|
|
|
|
|
31 |
# Extract text from the JSON output
|
32 |
context_texts = [hit['chunk_text'] for hit in json_output]
|
33 |
|
|
|
48 |
max_tokens=2000, # Limit the maximum number of tokens in the response
|
49 |
temperature=0.5
|
50 |
)
|
|
|
51 |
# Log the response from the model
|
52 |
logging.info("RAG response generation completed")
|
53 |
logging.info(f"RAG response: {chat_completion.choices[0].message.content}")
|