File size: 5,573 Bytes
1ac84a9 163bdcc 1ac84a9 56c2a68 1ac84a9 ed04595 66ec133 1ac84a9 f6fc806 ed04595 1ac84a9 fa530b9 f6fc806 4eb29a5 f6fc806 d98c79b ed04595 3cbea9b ed04595 9180e08 47f1619 ed04595 3c671c6 1ac84a9 4eb29a5 1ac84a9 3c671c6 ed04595 4eb29a5 ed04595 3c671c6 5ae861e 84a19ec 24cfdc1 84a19ec 1ac84a9 723d90c ed04595 84a19ec ed04595 5ae861e 1ac84a9 7f990d9 4eb29a5 7f990d9 f6fc806 84a19ec 7f990d9 1ac84a9 84a19ec 1ac84a9 ed04595 47f1619 ed04595 4eb29a5 ed04595 3c671c6 1ac84a9 f05ed48 3d7b342 f05ed48 1ac84a9 f05ed48 4487511 1ac84a9 5fcb857 cdc974d 5fcb857 4eb29a5 f05ed48 f6fc806 4eb29a5 52cceda f05ed48 20eb2c5 f05ed48 bf30b16 f05ed48 91b95bc f05ed48 5ae861e 2369fd8 f6fc806 73b2985 6f4788a f05ed48 6f4788a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import gradio as gr
import openai
from openai import OpenAI
import google.generativeai as genai
import os
import io
import base64
# Set API key and organization ID from environment variables
api_key = os.environ.get("API_KEY")
#base_url = os.environ.get("OPENAI_API_BASE")
# Define the model to be used
MODEL = os.environ.get("MODEL")
MODEL_NAME = MODEL.split("/")[-1] if "/" in MODEL else MODEL
def read(filename):
with open(filename) as f:
data = f.read()
return data
SYS_PROMPT = read('system_prompt.txt')
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">知觉demo</h1>
<p>🩺一个基于提示词和前沿多模态模型的AI,帮助您解读专业领域内容。</p>
<p>🔎 您可以选择领域,参考示例上传图像,或发送需要解读的文字内容。</p>
<p>🦕 生成解读内容仅供参考。</p>
</div>
'''
css = """
h1 {
text-align: center;
display: block;
}
footer {
display:none !important
}
"""
LICENSE = '采用 ' + MODEL_NAME + ' 模型'
def endpoints(api_key):
if api_key is not None:
if api_key.startswith('sk-'):
return 'OPENAI'
else:
return 'GOOGLE'
def process_text(text_input, unit):
print(text_input)
endpoint = endpoints(api_key)
if text_input and endpoint == 'OPENAI':
client = OpenAI(api_key=api_key)
completion = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": f" You are a experienced Analyst in {unit}." + SYS_PROMPT},
{"role": "user", "content": f"Hello! Could you analysis {text_input}?"}
]
)
return completion.choices[0].message.content
elif text_input and endpoint == 'GOOGLE':
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name=MODEL)
prompt = f" You are a experienced Analyst in {unit}." + SYS_PROMPT + f"Could you analysis {text_input}?"
response = model.generate_content(prompt)
return response.text
return ""
def encode_image_to_base64(image_input):
buffered = io.BytesIO()
image_input.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
def process_image(image_input, unit):
endpoint = endpoints(api_key)
if image_input is not None and endpoint == 'OPENAI':
#with open(image_input.name, "rb") as f:
# base64_image = base64.b64encode(f.read()).decode("utf-8")
client = OpenAI(api_key=api_key)
base64_image = encode_image_to_base64(image_input)
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": f" You are a experienced Analyst in {unit}." + SYS_PROMPT},
{"role": "user", "content": [
{"type": "text", "text": "Help me understand what is in this picture and analysis."},
{"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail":"low"}
}
]}
],
temperature=0.0,
max_tokens=1024,
)
return response.choices[0].message.content
elif image_input is not None and endpoint == 'GOOGLE':
print(image_input)
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name=MODEL)
prompt = f" You are a experienced Analyst in {unit}." + SYS_PROMPT + "Help me understand what is in this picture and analysis it."
response = model.generate_content([prompt, image_input],request_options={"timeout": 60})
return response.text
def main(text_input="", image_input=None, unit=""):
if text_input and image_input is None:
return process_text(text_input,unit)
elif image_input is not None:
return process_image(image_input,unit)
else:
gr.Error("请输入内容或者上传图片")
EXAMPLES = [
["./docs/estate.jpeg","",],
["./docs/pop.jpeg","",],
["./docs/debt.jpeg","",],
[None,"中国央行表示高度关注当前债券市场变化及潜在风险,必要时会进行卖出低风险债券包括国债操作",],
]
with gr.Blocks(theme='shivi/calm_seafoam', css=css, title="知觉demo") as iface:
with gr.Accordion(""):
gr.Markdown(DESCRIPTION)
unit = gr.Dropdown(label="领域", value='财经', elem_id="units",
choices=["财经", "法律", "政治", "体育", "医疗", \
"SEO", "评估", "科技", "交通", "行情"])
with gr.Row():
output_box = gr.Markdown(label="分析") # Create an output textbox
with gr.Row():
image_input = gr.Image(type="pil", label="上传图片") # Create an image upload button
text_input = gr.Textbox(label="输入") # Create a text input box
with gr.Row():
submit_btn = gr.Button("🚀 确认") # Create a submit button
clear_btn = gr.ClearButton([output_box,image_input,text_input], value="🗑️ 清空") # Create a clear button
# Set up the event listeners
submit_btn.click(main, inputs=[text_input, image_input, unit], outputs=output_box)
gr.Examples(examples=EXAMPLES, inputs=[image_input, text_input])
gr.Markdown(LICENSE)
#gr.close_all()
iface.queue().launch(show_api=False) # Launch the Gradio interface |