Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1602,7 +1602,7 @@ def process_text():
|
|
1602 |
)
|
1603 |
st.write("Assistant: " + completion.choices[0].message.content)
|
1604 |
|
1605 |
-
def
|
1606 |
if image_input:
|
1607 |
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
1608 |
response = client.chat.completions.create(
|
@@ -1620,6 +1620,51 @@ def process_image(image_input):
|
|
1620 |
)
|
1621 |
st.markdown(response.choices[0].message.content)
|
1622 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1623 |
def process_audio(audio_input):
|
1624 |
if audio_input:
|
1625 |
transcription = client.audio.transcriptions.create(
|
@@ -1741,14 +1786,10 @@ if __name__ == "__main__":
|
|
1741 |
main()
|
1742 |
|
1743 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
|
1744 |
-
|
1745 |
-
|
1746 |
num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=4)
|
1747 |
display_videos_and_links(num_columns_video) # Video Jump Grid
|
1748 |
-
|
1749 |
num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=4)
|
1750 |
display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
|
1751 |
-
|
1752 |
num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
|
1753 |
display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
|
1754 |
|
|
|
1602 |
)
|
1603 |
st.write("Assistant: " + completion.choices[0].message.content)
|
1604 |
|
1605 |
+
def process_image_old_05152024(image_input):
|
1606 |
if image_input:
|
1607 |
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
1608 |
response = client.chat.completions.create(
|
|
|
1620 |
)
|
1621 |
st.markdown(response.choices[0].message.content)
|
1622 |
|
1623 |
+
|
1624 |
+
def save_image(image_input, image_response):
|
1625 |
+
if image_input and image_response:
|
1626 |
+
# Extract emojis and first two words from each markdown line
|
1627 |
+
lines = image_response.split("\n")
|
1628 |
+
filename_parts = []
|
1629 |
+
for line in lines:
|
1630 |
+
if line.startswith("- "):
|
1631 |
+
emoji = re.search(r'(\p{Extended_Pictographic})', line)
|
1632 |
+
words = re.findall(r'\b\w+\b', line)
|
1633 |
+
if emoji and len(words) >= 2:
|
1634 |
+
filename_parts.append(f"{emoji.group(1)}_{words[1]}_{words[2]}")
|
1635 |
+
|
1636 |
+
# Create the filename by concatenating the extracted parts
|
1637 |
+
filename = "_".join(filename_parts)[:50] # Limit filename length to 50 characters
|
1638 |
+
filename = f"{filename}.png"
|
1639 |
+
|
1640 |
+
# Save the image with the new filename
|
1641 |
+
with open(filename, "wb") as f:
|
1642 |
+
f.write(image_input.getbuffer())
|
1643 |
+
|
1644 |
+
return filename
|
1645 |
+
|
1646 |
+
def process_image(image_input):
|
1647 |
+
if image_input:
|
1648 |
+
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
1649 |
+
response = client.chat.completions.create(
|
1650 |
+
model=MODEL,
|
1651 |
+
messages=[
|
1652 |
+
{"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
|
1653 |
+
{"role": "user", "content": [
|
1654 |
+
{"type": "text", "text": "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."},
|
1655 |
+
{"type": "image_url", "image_url": {
|
1656 |
+
"url": f"data:image/png;base64,{base64_image}"}
|
1657 |
+
}
|
1658 |
+
]}
|
1659 |
+
],
|
1660 |
+
temperature=0.0,
|
1661 |
+
)
|
1662 |
+
image_response = response.choices[0].message.content
|
1663 |
+
st.markdown(image_response)
|
1664 |
+
|
1665 |
+
# Save the image with a new filename based on the response
|
1666 |
+
save_image(image_input, image_response)
|
1667 |
+
|
1668 |
def process_audio(audio_input):
|
1669 |
if audio_input:
|
1670 |
transcription = client.audio.transcriptions.create(
|
|
|
1786 |
main()
|
1787 |
|
1788 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
|
|
|
|
|
1789 |
num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=4)
|
1790 |
display_videos_and_links(num_columns_video) # Video Jump Grid
|
|
|
1791 |
num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=4)
|
1792 |
display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
|
|
|
1793 |
num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
|
1794 |
display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
|
1795 |
|