Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -45,28 +45,4 @@ def query_faiss(query, index, embedder, chunks, top_k=2):
|
|
45 |
return "\n\n".join(retrieved_chunks)
|
46 |
|
47 |
# Build prompt and generate answer
|
48 |
-
def
|
49 |
-
context = query_faiss(query, index, embedder, chunks)
|
50 |
-
prompt = f"""You are an AI assistant. Use the following context to answer the question.
|
51 |
-
Context:
|
52 |
-
{context}
|
53 |
-
Question: {query}
|
54 |
-
Answer:"""
|
55 |
-
response = llm(prompt)[0]['generated_text']
|
56 |
-
if "Answer:" not in response:
|
57 |
-
return "Error: Unable to parse the model's response."
|
58 |
-
return response.split("Answer:")[-1].strip()
|
59 |
-
|
60 |
-
# Gradio interface
|
61 |
-
with gr.Blocks() as demo:
|
62 |
-
gr.Markdown("# 📄 Chat with a Transcript")
|
63 |
-
query_input = gr.Textbox(label="Ask a question about the transcript")
|
64 |
-
answer_output = gr.Textbox(label="Answer")
|
65 |
-
|
66 |
-
query_input.submit(
|
67 |
-
chat_with_transcript,
|
68 |
-
inputs=[query_input],
|
69 |
-
outputs=[answer_output]
|
70 |
-
)
|
71 |
-
|
72 |
-
demo.launch()
|
|
|
45 |
return "\n\n".join(retrieved_chunks)
|
46 |
|
47 |
# Build prompt and generate answer
|
48 |
+
def chat_with
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|