Update app.py
Browse files
app.py
CHANGED
@@ -137,6 +137,10 @@ with gr.Blocks(theme=Monochrome()) as demo:
|
|
137 |
<p>
|
138 |
<a href="https://github.com/pixeltable/pixeltable" target="_blank" style="color: #F25022; text-decoration: none; font-weight: bold;">Pixeltable</a> is a declarative interface for working with text, images, embeddings, and even video, enabling you to store, transform, index, and iterate on data.
|
139 |
</p>
|
|
|
|
|
|
|
|
|
140 |
"""
|
141 |
)
|
142 |
|
@@ -154,7 +158,7 @@ with gr.Blocks(theme=Monochrome()) as demo:
|
|
154 |
gr.Markdown("""
|
155 |
- When a user asks a question, the system searches for the most relevant chunks of text from the uploaded documents.
|
156 |
- It then uses these relevant chunks as context for a large language model (LLM) to generate an answer.
|
157 |
-
- The LLM
|
158 |
""")
|
159 |
|
160 |
with gr.Row():
|
|
|
137 |
<p>
|
138 |
<a href="https://github.com/pixeltable/pixeltable" target="_blank" style="color: #F25022; text-decoration: none; font-weight: bold;">Pixeltable</a> is a declarative interface for working with text, images, embeddings, and even video, enabling you to store, transform, index, and iterate on data.
|
139 |
</p>
|
140 |
+
|
141 |
+
<div style="background-color: #E5DDD4; border: 1px solid #e9ecef; color: #000000; border-radius: 8px; padding: 15px; margin-bottom: 20px;">
|
142 |
+
<strong style="color: #000000">Disclaimer:</strong> This app is best run on your own hardware with a GPU for optimal performance. This Hugging Face Space uses the free tier (2vCPU, 16GB RAM), which may result in slower processing times, especially for large video files. If you wish to use this app with your own hardware for improved performance, you can <a href="https://huggingface.co/spaces/Pixeltable/Multi-LLM-RAG-with-Groundtruth-Comparison/duplicate" target="_blank" style="color: #4D148C; text-decoration: none; font-weight: bold;">duplicate this Hugging Face Space</a> and run it locally, or use Google Colab with the Free limited GPU support.
|
143 |
+
</div>
|
144 |
"""
|
145 |
)
|
146 |
|
|
|
158 |
gr.Markdown("""
|
159 |
- When a user asks a question, the system searches for the most relevant chunks of text from the uploaded documents.
|
160 |
- It then uses these relevant chunks as context for a large language model (LLM) to generate an answer.
|
161 |
+
- The LLM formulates a response based on the provided context and the user's question.
|
162 |
""")
|
163 |
|
164 |
with gr.Row():
|