Spaces:
Sleeping
Sleeping
melihunsal
commited on
Commit
Β·
b5f6ee9
1
Parent(s):
0887a35
Add application file
Browse files- __init__.py +0 -0
- app.py +61 -0
- requirements.txt +4 -0
- templates.py +253 -0
__init__.py
ADDED
|
File without changes
|
app.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from time import sleep
|
| 3 |
+
import os
|
| 4 |
+
from templates import *
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Page title
|
| 8 |
+
title = 'π¦π DemoGPT'
|
| 9 |
+
|
| 10 |
+
st.set_page_config(page_title=title)
|
| 11 |
+
st.title(title)
|
| 12 |
+
st.markdown(
|
| 13 |
+
"""
|
| 14 |
+
This's just to showcase the capabilities of DemoGPT.
|
| 15 |
+
|
| 16 |
+
For custom applications, please open in [](https://github.com/melih-unsal/DemoGPT)
|
| 17 |
+
"""
|
| 18 |
+
)
|
| 19 |
+
# Text input
|
| 20 |
+
|
| 21 |
+
openai_api_key = st.text_input('Enter your OpenAI API Key', placeholder='sk-...',type="password")
|
| 22 |
+
demo_title = st.text_input('Enter your demo title', placeholder='Type your demo title')
|
| 23 |
+
|
| 24 |
+
st.write("Examples")
|
| 25 |
+
|
| 26 |
+
cols1 = st.columns([1,1,1.2])
|
| 27 |
+
cols2 = st.columns([1.6,1.5,1])
|
| 28 |
+
|
| 29 |
+
pid = None
|
| 30 |
+
|
| 31 |
+
pressed = False
|
| 32 |
+
|
| 33 |
+
if 'current' not in st.session_state:
|
| 34 |
+
st.session_state['current'] = ''
|
| 35 |
+
st.session_state['done'] = None
|
| 36 |
+
elif st.session_state['done']:
|
| 37 |
+
st.session_state['done'].empty()
|
| 38 |
+
|
| 39 |
+
for col,example in zip(cols1,examples1):
|
| 40 |
+
if col.button(example):
|
| 41 |
+
st.session_state['current'] = example
|
| 42 |
+
pressed = True
|
| 43 |
+
|
| 44 |
+
for col,example in zip(cols2,examples2):
|
| 45 |
+
if col.button(example):
|
| 46 |
+
st.session_state['current'] = example
|
| 47 |
+
pressed = True
|
| 48 |
+
|
| 49 |
+
st.markdown('----')
|
| 50 |
+
if st.session_state['current']:
|
| 51 |
+
with st.container():
|
| 52 |
+
if not openai_api_key:
|
| 53 |
+
st.warning('Please enter your OpenAI API Key', icon="β οΈ")
|
| 54 |
+
else:
|
| 55 |
+
if pressed:
|
| 56 |
+
wait()
|
| 57 |
+
st.session_state['done'] = st.success('Done!')
|
| 58 |
+
example2pages[st.session_state['current']](openai_api_key,demo_title)
|
| 59 |
+
st.markdown('----')
|
| 60 |
+
REPO_URL = "https://github.com/melih-unsal/DemoGPT"
|
| 61 |
+
st.markdown(f"project [repo on github]({REPO_URL}) waiting for your :star:")
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
langchain
|
| 3 |
+
openai
|
| 4 |
+
tiktoken
|
templates.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def wait():
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
progress_texts = ["Generating Code...:pencil:","Creating App...:running:","Rendering the demo page...:tv:"]
|
| 6 |
+
num_of_texts = len(progress_texts)
|
| 7 |
+
progress_texts_iter = iter(progress_texts)
|
| 8 |
+
my_bar = st.progress(0, "Initializing...")
|
| 9 |
+
with st.spinner('Processing...'):
|
| 10 |
+
start = end = 0
|
| 11 |
+
for i in range(num_of_texts):
|
| 12 |
+
text = next(progress_texts_iter)
|
| 13 |
+
start = end
|
| 14 |
+
end = start + 100 // num_of_texts
|
| 15 |
+
for percent_complete in range(start, end):
|
| 16 |
+
time.sleep(0.03*(num_of_texts-i))
|
| 17 |
+
my_bar.progress(percent_complete + 1, text=text)
|
| 18 |
+
my_bar.empty()
|
| 19 |
+
|
| 20 |
+
def language_translator(openai_api_key,demo_title="My Lang App"):
|
| 21 |
+
import streamlit as st
|
| 22 |
+
from langchain import LLMChain
|
| 23 |
+
from langchain.chat_models import ChatOpenAI
|
| 24 |
+
from langchain.prompts.chat import (
|
| 25 |
+
ChatPromptTemplate,
|
| 26 |
+
SystemMessagePromptTemplate,
|
| 27 |
+
HumanMessagePromptTemplate,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
def language_translator(input_language, output_language, text):
|
| 31 |
+
chat = ChatOpenAI(openai_api_key=openai_api_key, temperature=0)
|
| 32 |
+
|
| 33 |
+
template = "You are a helpful assistant that translates {input_language} to {output_language}. Please provide the text to translate."
|
| 34 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
| 35 |
+
human_template = "{text}"
|
| 36 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
| 37 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
| 38 |
+
|
| 39 |
+
chain = LLMChain(llm=chat, prompt=chat_prompt)
|
| 40 |
+
result = chain.run(input_language=input_language, output_language=output_language, text=text)
|
| 41 |
+
return result
|
| 42 |
+
|
| 43 |
+
st.header(demo_title)
|
| 44 |
+
|
| 45 |
+
input_language = st.text_input("Input Language")
|
| 46 |
+
output_language = st.text_input("Output Language")
|
| 47 |
+
text = st.text_area("Text")
|
| 48 |
+
|
| 49 |
+
if st.button("Translate"):
|
| 50 |
+
result = language_translator(input_language, output_language, text)
|
| 51 |
+
st.write(result)
|
| 52 |
+
st.balloons()
|
| 53 |
+
|
| 54 |
+
def blog_post_generator(openai_api_key,demo_title="My Blogger"):
|
| 55 |
+
import streamlit as st
|
| 56 |
+
from langchain import LLMChain
|
| 57 |
+
from langchain.chat_models import ChatOpenAI
|
| 58 |
+
from langchain.prompts.chat import (
|
| 59 |
+
ChatPromptTemplate,
|
| 60 |
+
SystemMessagePromptTemplate,
|
| 61 |
+
HumanMessagePromptTemplate,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def generate_blog_post(title):
|
| 65 |
+
print("Generating blog post")
|
| 66 |
+
chat = ChatOpenAI(openai_api_key=openai_api_key, temperature=0)
|
| 67 |
+
|
| 68 |
+
template = "You are a helpful assistant that generates a blog post from the title: {title}. Please provide some content."
|
| 69 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
| 70 |
+
human_template = "{text}"
|
| 71 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
| 72 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
| 73 |
+
|
| 74 |
+
chain = LLMChain(llm=chat, prompt=chat_prompt)
|
| 75 |
+
result = chain.run(title=title, text="")
|
| 76 |
+
return result
|
| 77 |
+
|
| 78 |
+
st.header(demo_title)
|
| 79 |
+
|
| 80 |
+
title = st.text_input("Enter the title of your blog post")
|
| 81 |
+
if st.button("Generate Blog Post"):
|
| 82 |
+
print("Generate")
|
| 83 |
+
with st.spinner("Generating the blog post..."):
|
| 84 |
+
result = generate_blog_post(title)
|
| 85 |
+
st.write(result)
|
| 86 |
+
st.balloons()
|
| 87 |
+
|
| 88 |
+
def grammer_corrector(openai_api_key,demo_title="My Grammerly"):
|
| 89 |
+
import streamlit as st
|
| 90 |
+
from langchain import LLMChain
|
| 91 |
+
from langchain.chat_models import ChatOpenAI
|
| 92 |
+
from langchain.prompts.chat import (
|
| 93 |
+
ChatPromptTemplate,
|
| 94 |
+
SystemMessagePromptTemplate,
|
| 95 |
+
HumanMessagePromptTemplate,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
def correct_grammar(text):
|
| 99 |
+
chat = ChatOpenAI(openai_api_key=openai_api_key, temperature=0)
|
| 100 |
+
|
| 101 |
+
template = "You are a helpful assistant that corrects grammar. Please provide the text you want to correct."
|
| 102 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
| 103 |
+
human_template = "{text}"
|
| 104 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
| 105 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
| 106 |
+
|
| 107 |
+
chain = LLMChain(llm=chat, prompt=chat_prompt)
|
| 108 |
+
result = chain.run(text=text)
|
| 109 |
+
return result
|
| 110 |
+
|
| 111 |
+
st.header(demo_title)
|
| 112 |
+
|
| 113 |
+
text = st.text_input("Enter the text you want to correct")
|
| 114 |
+
if st.button("Correct Grammar"):
|
| 115 |
+
result = correct_grammar(text)
|
| 116 |
+
st.write(result)
|
| 117 |
+
st.balloons()
|
| 118 |
+
|
| 119 |
+
def lyrics_generator(openai_api_key,demo_title="Lyrics Maker"):
|
| 120 |
+
import streamlit as st
|
| 121 |
+
from langchain import LLMChain
|
| 122 |
+
from langchain.chat_models import ChatOpenAI
|
| 123 |
+
from langchain.prompts.chat import (
|
| 124 |
+
ChatPromptTemplate,
|
| 125 |
+
SystemMessagePromptTemplate,
|
| 126 |
+
HumanMessagePromptTemplate,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
def generate_song(title):
|
| 130 |
+
chat = ChatOpenAI(openai_api_key=openai_api_key, temperature=0)
|
| 131 |
+
|
| 132 |
+
template = "You are a helpful assistant that generates a song from the title: {title}. Please provide some lyrics."
|
| 133 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
| 134 |
+
human_template = "{text}"
|
| 135 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
| 136 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
| 137 |
+
|
| 138 |
+
chain = LLMChain(llm=chat, prompt=chat_prompt)
|
| 139 |
+
result = chain.run(title=title, text="")
|
| 140 |
+
return result
|
| 141 |
+
|
| 142 |
+
st.header(demo_title)
|
| 143 |
+
|
| 144 |
+
title = st.text_input("Enter the song title:")
|
| 145 |
+
if st.button("Generate Song"):
|
| 146 |
+
with st.spinner("Generating song..."):
|
| 147 |
+
result = generate_song(title)
|
| 148 |
+
st.write(result)
|
| 149 |
+
st.balloons()
|
| 150 |
+
|
| 151 |
+
def twit_generator(openai_api_key,demo_title="My AutoTwitter"):
|
| 152 |
+
import streamlit as st
|
| 153 |
+
from langchain import LLMChain
|
| 154 |
+
from langchain.chat_models import ChatOpenAI
|
| 155 |
+
from langchain.prompts.chat import (
|
| 156 |
+
ChatPromptTemplate,
|
| 157 |
+
SystemMessagePromptTemplate,
|
| 158 |
+
HumanMessagePromptTemplate,
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def twitter(hashtag):
|
| 162 |
+
chat = ChatOpenAI(openai_api_key=openai_api_key, temperature=0.1)
|
| 163 |
+
|
| 164 |
+
template = "You are a helpful assistant that generate twit from {hashtag}. Please provide the hashtag to generate a twit."
|
| 165 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
| 166 |
+
human_template = "Only generate the corresponding twit for this hashtag {hashtag}"
|
| 167 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
| 168 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
| 169 |
+
|
| 170 |
+
chain = LLMChain(llm=chat, prompt=chat_prompt)
|
| 171 |
+
result = chain.run(hashtag=hashtag)
|
| 172 |
+
return result
|
| 173 |
+
|
| 174 |
+
st.header(demo_title)
|
| 175 |
+
|
| 176 |
+
hashtag = st.text_input("Hashtag",placeholder="#")
|
| 177 |
+
|
| 178 |
+
if st.button("Generate"):
|
| 179 |
+
result = twitter(hashtag)
|
| 180 |
+
st.write(result)
|
| 181 |
+
st.balloons()
|
| 182 |
+
|
| 183 |
+
def email_generator(openai_api_key,demo_title="My AutoTwitter"):
|
| 184 |
+
import streamlit as st
|
| 185 |
+
from langchain import LLMChain
|
| 186 |
+
from langchain.chat_models import ChatOpenAI
|
| 187 |
+
from langchain.prompts.chat import (
|
| 188 |
+
ChatPromptTemplate,
|
| 189 |
+
SystemMessagePromptTemplate,
|
| 190 |
+
HumanMessagePromptTemplate,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
def email(sender_name,receiver_name,purpose,keywords,tone):
|
| 194 |
+
chat = ChatOpenAI(openai_api_key=openai_api_key, temperature=0.1)
|
| 195 |
+
|
| 196 |
+
template = "You are a helpful assistant that generate email to a person according to the given purpose, keywords and tone."
|
| 197 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
| 198 |
+
human_template = """Generate email for a person according to the given purpose, keywords and tone.
|
| 199 |
+
Sender Name:{sender_name}
|
| 200 |
+
Receiver Name:{receiver_name}
|
| 201 |
+
Purpose:{purpose}
|
| 202 |
+
Keywords:{keywords}
|
| 203 |
+
Tone:{tone}
|
| 204 |
+
Directly start to type an email
|
| 205 |
+
"""
|
| 206 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
| 207 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
| 208 |
+
|
| 209 |
+
chain = LLMChain(llm=chat, prompt=chat_prompt)
|
| 210 |
+
result = chain.run(sender_name=sender_name, receiver_name=receiver_name, purpose=purpose, keywords=keywords, tone=tone)
|
| 211 |
+
return result
|
| 212 |
+
|
| 213 |
+
st.header(demo_title)
|
| 214 |
+
|
| 215 |
+
sender_name = st.text_input("Name of the sender")
|
| 216 |
+
receiver_name = st.text_input("Receiver of the sender")
|
| 217 |
+
purpose = st.text_input("Purpose of email")
|
| 218 |
+
keywords = st.text_input("Primary keywords",placeholder="comma separated list of keywords")
|
| 219 |
+
tone = st.text_input("Tone of the email")
|
| 220 |
+
|
| 221 |
+
if st.button("Generate"):
|
| 222 |
+
with st.spinner("Generating email..."):
|
| 223 |
+
result = email(sender_name,receiver_name,purpose,keywords,tone)
|
| 224 |
+
st.write(result)
|
| 225 |
+
st.balloons()
|
| 226 |
+
|
| 227 |
+
examples1 = [
|
| 228 |
+
"Language Translator π",
|
| 229 |
+
"Grammer Corrector π ",
|
| 230 |
+
"Blog post generator from title π"
|
| 231 |
+
]
|
| 232 |
+
|
| 233 |
+
examples2=[
|
| 234 |
+
"Lyrics generator from song title π€",
|
| 235 |
+
"Twit generation from hashtag π¦",
|
| 236 |
+
'Email generator :email:'
|
| 237 |
+
]
|
| 238 |
+
|
| 239 |
+
examples = examples1 + examples2
|
| 240 |
+
|
| 241 |
+
pages1 = [language_translator,grammer_corrector,blog_post_generator]
|
| 242 |
+
pages2=[lyrics_generator,twit_generator,email_generator]
|
| 243 |
+
|
| 244 |
+
pages = pages1 + pages2
|
| 245 |
+
|
| 246 |
+
example2pages={
|
| 247 |
+
example:page
|
| 248 |
+
for example,page in zip(examples,pages)
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
__all__ = ['language_translator','grammer_corrector','blog_post_generator','lyrics_generator','twit_generator',
|
| 253 |
+
'example2pages', 'examples', 'examples1', 'examples2', 'wait']
|