Spaces:
Running
on
Zero
Running
on
Zero
kwabs22
commited on
Commit
·
286206e
1
Parent(s):
cd998d9
Front end as FAQ test
Browse files
app.py
CHANGED
@@ -302,6 +302,25 @@ def process_query(query, use_rag, stream=False):
|
|
302 |
|
303 |
#--------------------------------------------------------------------------------------------------------------------------------
|
304 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
# Default configuration template
|
306 |
default_config = {
|
307 |
'background': '/AutoGameBackgrounds/1stGameLoc123.png',
|
@@ -942,7 +961,19 @@ with gr.Blocks() as demo:
|
|
942 |
<div style="width: 20%; text-align: center">HF + Gradio allows for api use so this my prototype tool for tool use test</div>
|
943 |
</div>""")
|
944 |
with gr.Accordion("Qwen 0.5B as Space Guide Tests", open=False):
|
945 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
946 |
# gr.Interface(
|
947 |
# fn=rag,
|
948 |
# inputs=[
|
@@ -958,46 +989,45 @@ with gr.Blocks() as demo:
|
|
958 |
# description="Ask a question and get an answer based on the retrieved context. The response is generated using a GPU-accelerated model. Resource usage is logged at the end of generation."
|
959 |
# )
|
960 |
|
961 |
-
|
962 |
-
|
963 |
-
|
964 |
-
|
965 |
-
|
966 |
-
|
967 |
-
|
968 |
-
|
969 |
-
|
970 |
-
|
971 |
-
|
972 |
-
|
973 |
-
|
974 |
-
|
975 |
-
|
976 |
-
|
977 |
-
gr.HTML("Placeholder for FAQ type (merge as buttons on the above interface) - front end as prompt engineering for the first message to force direction of conversion")
|
978 |
-
|
979 |
|
980 |
-
gr.
|
981 |
-
|
982 |
-
|
983 |
-
|
984 |
-
|
985 |
-
|
986 |
-
with gr.Row():
|
987 |
-
with gr.Column():
|
988 |
-
llmguide_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...")
|
989 |
-
llmguide_stream_checkbox = gr.Checkbox(label="Enable streaming")
|
990 |
-
llmguide_submit_button = gr.Button("Generate")
|
991 |
|
992 |
-
with gr.
|
993 |
-
|
994 |
-
|
995 |
-
|
996 |
-
|
997 |
-
|
998 |
-
|
999 |
-
|
1000 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1001 |
|
1002 |
Frontendpromptengforguide = """Suggest descriptions for media to fill the empty media fields -
|
1003 |
|
|
|
302 |
|
303 |
#--------------------------------------------------------------------------------------------------------------------------------
|
304 |
|
305 |
+
FAQAllprompts = {
|
306 |
+
"Brainstorming for this space": [
|
307 |
+
("My problem to solve is", "- please make 10 sub problems have to solve from this:"),
|
308 |
+
],
|
309 |
+
"Expansive Problem solving": [
|
310 |
+
("My problem to solve is", "- please make 10 sub problems have to solve from this:"),
|
311 |
+
("My process to solve is", "- please make 10 sub processes have to solve from this:"),
|
312 |
+
("My goal to solve is", "- please make 10 sub goals have to solve from this:"),
|
313 |
+
("My task to solve is", "- please make 10 sub tasks have to solve from this:"),
|
314 |
+
("My phase to solve is", "- please make 10 sub phases have to solve from this:"),
|
315 |
+
("My component to solve is", "- please make 10 sub components have to solve from this:"),
|
316 |
+
("My element to solve is", "- please make 10 sub elements have to solve from this:"),
|
317 |
+
("A brief description of my current situation:", "- please list the most important task to pay attention to:"),
|
318 |
+
("A brief description of my current situation to analyse:", "- please conduct a situational analysis:"),
|
319 |
+
("A brief description of my current situation to decompose:", "- please conduct a problem decomposition:"),
|
320 |
+
],
|
321 |
+
}
|
322 |
+
|
323 |
+
#--------------------------------------------------------------------------------------------------------------------------------
|
324 |
# Default configuration template
|
325 |
default_config = {
|
326 |
'background': '/AutoGameBackgrounds/1stGameLoc123.png',
|
|
|
961 |
<div style="width: 20%; text-align: center">HF + Gradio allows for api use so this my prototype tool for tool use test</div>
|
962 |
</div>""")
|
963 |
with gr.Accordion("Qwen 0.5B as Space Guide Tests", open=False):
|
964 |
+
with gr.Tab("General FAQ Attempt"):
|
965 |
+
FAQMainOutput = gr.TextArea(placeholder='Output will show here')
|
966 |
+
FAQCustomButtonInput = gr.TextArea(lines=1, placeholder='Prompt goes here')
|
967 |
+
|
968 |
+
for category_name, category_prompts in FAQAllprompts.items():
|
969 |
+
with gr.Accordion(f"General {category_name} Pattern based", open=False):
|
970 |
+
with gr.Group():
|
971 |
+
for index, (prompt, _) in enumerate(category_prompts):
|
972 |
+
button = gr.Button(prompt)
|
973 |
+
button.click(llmguide_generate_response, inputs=[FAQCustomButtonInput, gr.State(index), gr.State(category_name)], outputs=FAQMainOutput)
|
974 |
+
|
975 |
+
with gr.Tab("General RAG (Pathfinder?) Attempt"):
|
976 |
+
gr.HTML("Placeholder for weak RAG Type - Explanations through an opensource embeddings engine")
|
977 |
# gr.Interface(
|
978 |
# fn=rag,
|
979 |
# inputs=[
|
|
|
989 |
# description="Ask a question and get an answer based on the retrieved context. The response is generated using a GPU-accelerated model. Resource usage is logged at the end of generation."
|
990 |
# )
|
991 |
|
992 |
+
gr.Interface(
|
993 |
+
fn=process_query,
|
994 |
+
inputs=[
|
995 |
+
gr.Textbox(lines=2, placeholder="Enter your question here..."),
|
996 |
+
gr.Checkbox(label="Use RAG"),
|
997 |
+
gr.Checkbox(label="Stream output")
|
998 |
+
],
|
999 |
+
outputs=[
|
1000 |
+
gr.Textbox(label="Generated Response"),
|
1001 |
+
gr.Textbox(label="Tokens per second"),
|
1002 |
+
gr.Textbox(label="RAM Usage"),
|
1003 |
+
gr.Textbox(label="Referenced Documents")
|
1004 |
+
],
|
1005 |
+
title="RAG/Non-RAG Q&A System",
|
1006 |
+
description="Ask a question with or without using RAG. The response is generated using a GPU-accelerated model. RAM usage and referenced document IDs (for RAG) are logged."
|
1007 |
+
)
|
|
|
|
|
1008 |
|
1009 |
+
with gr.Tab("Any Request to Qwen2-0.5B"):
|
1010 |
+
gr.HTML("Placeholder for https://huggingface.co/h2oai/h2o-danube3-500m-chat-GGUF as alternative")
|
1011 |
+
gr.HTML("Placeholder for qwen 2 72b as alternative use checkbox and gradio client api call")
|
1012 |
+
gr.Markdown("# Qwen-0.5B-Instruct Language Model")
|
1013 |
+
gr.Markdown("This demo uses the Qwen-0.5B-Instruct model to generate responses based on your input.")
|
1014 |
+
gr.HTML("Example prompts: <br>I am writing a story about a chef. please write dishes to appear on the menu. <br>What are the most common decisions that a chef story would include? <br>What are the kinds problems that a chef story would include? <br>What are the kinds of out of reach goals that a chef story would include? <br>Continue this config - Paste any complete block of the config")
|
|
|
|
|
|
|
|
|
|
|
1015 |
|
1016 |
+
with gr.Row():
|
1017 |
+
with gr.Column():
|
1018 |
+
llmguide_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...")
|
1019 |
+
llmguide_stream_checkbox = gr.Checkbox(label="Enable streaming")
|
1020 |
+
llmguide_submit_button = gr.Button("Generate")
|
1021 |
+
|
1022 |
+
with gr.Column():
|
1023 |
+
llmguide_output = gr.Textbox(lines=10, label="Generated Response")
|
1024 |
+
llmguide_tokens_per_second = gr.Textbox(label="Tokens per Second")
|
1025 |
+
|
1026 |
+
llmguide_submit_button.click(
|
1027 |
+
llmguide_generate_response,
|
1028 |
+
inputs=[llmguide_prompt, llmguide_stream_checkbox],
|
1029 |
+
outputs=[llmguide_output, llmguide_tokens_per_second],
|
1030 |
+
)
|
1031 |
|
1032 |
Frontendpromptengforguide = """Suggest descriptions for media to fill the empty media fields -
|
1033 |
|