awacke1 commited on
Commit
cfb39dc
·
verified ·
1 Parent(s): 0328c8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -68
app.py CHANGED
@@ -1,39 +1,37 @@
1
- import streamlit as st
2
- import streamlit.components.v1 as components
3
- import os
4
- import json
5
- import random
6
  import base64
 
7
  import glob
 
8
  import math
 
9
  import pytz
 
10
  import re
11
  import requests
 
 
 
12
  import time
13
  import zipfile
14
- import dotenv
15
- from gradio_client import Client
16
  from audio_recorder_streamlit import audio_recorder
17
  from bs4 import BeautifulSoup
18
  from collections import deque
19
  from datetime import datetime
20
  from dotenv import load_dotenv
 
21
  from huggingface_hub import InferenceClient
22
  from io import BytesIO
23
-
 
24
  from PyPDF2 import PdfReader
25
  from templates import bot_template, css, user_template
26
- from xml.etree import ElementTree as ET
27
- from PIL import Image
28
  from urllib.parse import quote # Ensure this import is included
 
29
 
30
  import openai
31
  from openai import OpenAI
32
 
33
- import cv2
34
- from moviepy.editor import VideoFileClip
35
- import textract
36
-
37
 
38
 
39
  # 1. Configuration
@@ -467,14 +465,6 @@ def FileSidebar():
467
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
468
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by filename length which puts similar prompts together - consider making date and time of file optional.
469
 
470
-
471
-
472
- # Button to compare files and delete duplicates
473
- #if st.button("Compare and Delete Duplicates"):
474
- # compare_and_delete_files(all_files)
475
-
476
-
477
-
478
  # ⬇️ Download
479
  Files1, Files2 = st.sidebar.columns(2)
480
  with Files1:
@@ -489,7 +479,6 @@ def FileSidebar():
489
  file_contents=''
490
  file_name=''
491
  next_action=''
492
-
493
 
494
  # Add files 🌐View, 📂Open, ▶️Run, and 🗑Delete per file
495
  for file in all_files:
@@ -621,7 +610,6 @@ titles = [
621
  "🏰 Semantic 🧠 Soul 🙌 & Episodic 📜 Essence",
622
  "🥁🎻 The Music Of AI's Mind 🧠🎭🎉"
623
  ]
624
-
625
  selected_title = random.choice(titles)
626
  st.markdown(f"**{selected_title}**")
627
 
@@ -701,21 +689,10 @@ def load_score(key):
701
  # 🔍Search Glossary
702
  @st.cache_resource
703
  def search_glossary(query):
704
- #for category, terms in roleplaying_glossary.items():
705
- # if query.lower() in (term.lower() for term in terms):
706
- # st.markdown(f"#### {category}")
707
- # st.write(f"- {query}")
708
  all=""
709
  st.markdown(f"- {query}")
710
 
711
-
712
- # 🔍Run 1 - plain query
713
- #response = chat_with_model(query)
714
- #response1 = chat_with_model45(query)
715
- #all = query + ' ' + response1
716
- #st.write('🔍Run 1 is Complete.')
717
-
718
- # ArXiv searcher ~-<>-~ Paper Summary - Ask LLM
719
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
720
  response2 = client.predict(
721
  query, # str in 'parameter_13' Textbox component
@@ -738,42 +715,11 @@ def search_glossary(query):
738
  api_name="/update_with_rag_md"
739
  )
740
  st.write('🔍Run of Multi-Agent System Paper References is Complete')
741
- #st.markdown(response1)
742
-
743
  responseall = response2 + response1[0] + response1[1]
744
  st.markdown(responseall)
745
  return responseall
746
 
747
- # GPT 35 turbo and GPT 45 - - - - - - - - - - - - -<><><><><>:
748
- RunPostArxivLLM = False
749
- if RunPostArxivLLM:
750
- # 🔍Run PaperSummarizer
751
- PaperSummarizer = ' Create a paper summary as a markdown table with paper links clustering the features writing short markdown emoji outlines to extract three main ideas from each of the ten summaries. For each one create three simple points led by an emoji of the main three steps needed as method step process for implementing the idea as a single app.py streamlit python app. '
752
- response2 = chat_with_model(PaperSummarizer + str(response1))
753
- st.write('🔍Run 3 - Paper Summarizer is Complete.')
754
-
755
- # 🔍Run AppSpecifier
756
- AppSpecifier = ' Design and write a streamlit python code listing and specification that implements each scientific method steps as ten functions keeping specification in a markdown table in the function comments with original paper link to outline the AI pipeline ensemble implementing code as full plan to build.'
757
- response3 = chat_with_model(AppSpecifier + str(response2))
758
- st.write('🔍Run 4 - AppSpecifier is Complete.')
759
-
760
- # 🔍Run PythonAppCoder
761
- PythonAppCoder = ' Complete this streamlit python app implementing the functions in detail using appropriate python libraries and streamlit user interface elements. Show full code listing for the completed detail app as full code listing with no comments or commentary. '
762
- #result = str(result).replace('\n', ' ').replace('|', ' ')
763
- # response4 = chat_with_model45(PythonAppCoder + str(response3))
764
- response4 = chat_with_model(PythonAppCoder + str(response3))
765
- st.write('🔍Run Python AppCoder is Complete.')
766
-
767
- # experimental 45 - - - - - - - - - - - - -<><><><><>
768
-
769
- responseAll = '# Query: ' + query + '# Summary: ' + str(response2) + '# Streamlit App Specifier: ' + str(response3) + '# Complete Streamlit App: ' + str(response4) + '# Scholarly Article Links References: ' + str(response1)
770
- filename = generate_filename(responseAll, "md")
771
- create_file(filename, query, responseAll, should_save)
772
-
773
- return responseAll # 🔍Run--------------------------------------------------------
774
- else:
775
- return response1
776
-
777
  # Function to display the glossary in a structured format
778
  def display_glossary(glossary, area):
779
  if area in glossary:
 
 
 
 
 
 
1
  import base64
2
+ import cv2
3
  import glob
4
+ import json
5
  import math
6
+ import os
7
  import pytz
8
+ import random
9
  import re
10
  import requests
11
+ import streamlit as st
12
+ import streamlit.components.v1 as components
13
+ import textract
14
  import time
15
  import zipfile
16
+
 
17
  from audio_recorder_streamlit import audio_recorder
18
  from bs4 import BeautifulSoup
19
  from collections import deque
20
  from datetime import datetime
21
  from dotenv import load_dotenv
22
+ from gradio_client import Client
23
  from huggingface_hub import InferenceClient
24
  from io import BytesIO
25
+ from moviepy.editor import VideoFileClip
26
+ from PIL import Image
27
  from PyPDF2 import PdfReader
28
  from templates import bot_template, css, user_template
 
 
29
  from urllib.parse import quote # Ensure this import is included
30
+ from xml.etree import ElementTree as ET
31
 
32
  import openai
33
  from openai import OpenAI
34
 
 
 
 
 
35
 
36
 
37
  # 1. Configuration
 
465
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
466
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by filename length which puts similar prompts together - consider making date and time of file optional.
467
 
 
 
 
 
 
 
 
 
468
  # ⬇️ Download
469
  Files1, Files2 = st.sidebar.columns(2)
470
  with Files1:
 
479
  file_contents=''
480
  file_name=''
481
  next_action=''
 
482
 
483
  # Add files 🌐View, 📂Open, ▶️Run, and 🗑Delete per file
484
  for file in all_files:
 
610
  "🏰 Semantic 🧠 Soul 🙌 & Episodic 📜 Essence",
611
  "🥁🎻 The Music Of AI's Mind 🧠🎭🎉"
612
  ]
 
613
  selected_title = random.choice(titles)
614
  st.markdown(f"**{selected_title}**")
615
 
 
689
  # 🔍Search Glossary
690
  @st.cache_resource
691
  def search_glossary(query):
 
 
 
 
692
  all=""
693
  st.markdown(f"- {query}")
694
 
695
+ # 🔍Run 1 - ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
 
 
 
 
 
 
 
696
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
697
  response2 = client.predict(
698
  query, # str in 'parameter_13' Textbox component
 
715
  api_name="/update_with_rag_md"
716
  )
717
  st.write('🔍Run of Multi-Agent System Paper References is Complete')
 
 
718
  responseall = response2 + response1[0] + response1[1]
719
  st.markdown(responseall)
720
  return responseall
721
 
722
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
723
  # Function to display the glossary in a structured format
724
  def display_glossary(glossary, area):
725
  if area in glossary: