Spaces:
Sleeping
Sleeping
100stacks
commited on
app: YACI app
Browse files- .chainlit/config.toml +84 -0
- Dockerfile +8 -6
- README.md +0 -2
- app-hw.py +7 -0
- app.py +81 -5
- requirements.txt +6 -2
.chainlit/config.toml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
# Whether to enable telemetry (default: true). No personal data is collected.
|
3 |
+
enable_telemetry = false
|
4 |
+
|
5 |
+
# List of environment variables to be provided by each user to use the app.
|
6 |
+
user_env = []
|
7 |
+
|
8 |
+
# Duration (in seconds) during which the session is saved when the connection is lost
|
9 |
+
session_timeout = 3600
|
10 |
+
|
11 |
+
# Enable third parties caching (e.g LangChain cache)
|
12 |
+
cache = false
|
13 |
+
|
14 |
+
# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
|
15 |
+
# follow_symlink = false
|
16 |
+
|
17 |
+
[features]
|
18 |
+
# Show the prompt playground
|
19 |
+
prompt_playground = true
|
20 |
+
|
21 |
+
# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
|
22 |
+
unsafe_allow_html = false
|
23 |
+
|
24 |
+
# Process and display mathematical expressions. This can clash with "$" characters in messages.
|
25 |
+
latex = false
|
26 |
+
|
27 |
+
# Authorize users to upload files with messages
|
28 |
+
multi_modal = true
|
29 |
+
|
30 |
+
# Allows user to use speech to text
|
31 |
+
[features.speech_to_text]
|
32 |
+
enabled = false
|
33 |
+
# See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
|
34 |
+
# language = "en-US"
|
35 |
+
|
36 |
+
[UI]
|
37 |
+
# Name of the app and chatbot.
|
38 |
+
name = "YACI"
|
39 |
+
|
40 |
+
# Show the readme while the conversation is empty.
|
41 |
+
show_readme_as_default = true
|
42 |
+
|
43 |
+
# Description of the app and chatbot. This is used for HTML tags.
|
44 |
+
# description = ""
|
45 |
+
|
46 |
+
# Large size content are by default collapsed for a cleaner ui
|
47 |
+
default_collapse_content = true
|
48 |
+
|
49 |
+
# The default value for the expand messages settings.
|
50 |
+
default_expand_messages = false
|
51 |
+
|
52 |
+
# Hide the chain of thought details from the user in the UI.
|
53 |
+
hide_cot = false
|
54 |
+
|
55 |
+
# Link to your github repo. This will add a github button in the UI's header.
|
56 |
+
github = "https://github.com/100stacks"
|
57 |
+
|
58 |
+
# Specify a CSS file that can be used to customize the user interface.
|
59 |
+
# The CSS file can be served from the public directory or via an external link.
|
60 |
+
# custom_css = "/public/test.css"
|
61 |
+
|
62 |
+
# Override default MUI light theme. (Check theme.ts)
|
63 |
+
[UI.theme.light]
|
64 |
+
#background = "#FAFAFA"
|
65 |
+
#paper = "#FFFFFF"
|
66 |
+
|
67 |
+
[UI.theme.light.primary]
|
68 |
+
#main = "#F80061"
|
69 |
+
#dark = "#980039"
|
70 |
+
#light = "#FFE7EB"
|
71 |
+
|
72 |
+
# Override default MUI dark theme. (Check theme.ts)
|
73 |
+
[UI.theme.dark]
|
74 |
+
#background = "#FAFAFA"
|
75 |
+
#paper = "#FFFFFF"
|
76 |
+
|
77 |
+
[UI.theme.dark.primary]
|
78 |
+
#main = "#F80061"
|
79 |
+
#dark = "#980039"
|
80 |
+
#light = "#FFE7EB"
|
81 |
+
|
82 |
+
|
83 |
+
[meta]
|
84 |
+
generated_by = "0.7.700"
|
Dockerfile
CHANGED
@@ -4,12 +4,14 @@ FROM python:3.9
|
|
4 |
|
5 |
RUN useradd -m -u 1000 user
|
6 |
USER user
|
7 |
-
ENV
|
|
|
8 |
|
9 |
-
WORKDIR /app
|
10 |
|
11 |
-
COPY --chown=user
|
12 |
-
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
4 |
|
5 |
RUN useradd -m -u 1000 user
|
6 |
USER user
|
7 |
+
ENV HOME=/home/user \
|
8 |
+
PATH="/home/user/.local/bin:$PATH"
|
9 |
|
10 |
+
WORKDIR $HOME/app
|
11 |
|
12 |
+
COPY --chown=user . $HOME/app
|
13 |
+
COPY --chown=user ./requirements.txt ~/app/requirements.txt
|
14 |
+
RUN pip install -r requirements.txt
|
15 |
+
COPY . .
|
16 |
|
17 |
+
CMD ["chainlit", "run", "app.py", "0.0.0.0", "--port", "7860"]
|
|
README.md
CHANGED
@@ -8,5 +8,3 @@ pinned: false
|
|
8 |
license: openrail
|
9 |
short_description: YACI...Yet another chat interface. ☺️
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
8 |
license: openrail
|
9 |
short_description: YACI...Yet another chat interface. ☺️
|
10 |
---
|
|
|
|
app-hw.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
|
3 |
+
app = FastAPI()
|
4 |
+
|
5 |
+
@app.get("/")
|
6 |
+
def greet_json():
|
7 |
+
return {"Hello": "World from 🤗!"}
|
app.py
CHANGED
@@ -1,7 +1,83 @@
|
|
1 |
-
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YACI - Hugging Face Spaces
|
2 |
|
3 |
+
# OpenAI chat completion
|
4 |
+
import os
|
5 |
+
from openai import AsyncOpenAI
|
6 |
+
import chainlit as cl
|
7 |
+
from chainlit.prompt import Prompt, PromptMessage # prompt tools
|
8 |
+
from chainlit.playground.providers import ChatOpenAI # Chainlit's OpenAI tools
|
9 |
+
from dotenv import load_dotenv
|
10 |
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
# Model config
|
14 |
+
MODEL = "gpt-4o-mini-2024-07-18"
|
15 |
+
MAX_TOKENS = 500
|
16 |
+
TEMP = 0
|
17 |
+
TOP_P = 1
|
18 |
+
|
19 |
+
# ChatOpenAI
|
20 |
+
system_template = """You are a helpful assistant who always speaks in a pleasant tone!"""
|
21 |
+
|
22 |
+
user_template = """{input}
|
23 |
+
Think through your response step by step.
|
24 |
+
"""
|
25 |
+
|
26 |
+
@cl.on_chat_start # init user chat session
|
27 |
+
async def start_chat():
|
28 |
+
settings = {
|
29 |
+
"model": MODEL,
|
30 |
+
"temperature": TEMP,
|
31 |
+
"max_tokens": MAX_TOKENS,
|
32 |
+
"top_p": TOP_P,
|
33 |
+
"frequency_penalty": 0,
|
34 |
+
"presence_penalty": 0,
|
35 |
+
}
|
36 |
+
|
37 |
+
cl.user_session.set("settings", settings)
|
38 |
+
|
39 |
+
@cl.on_message # runs each time the chatbot receives a message from a user
|
40 |
+
async def main(message: cl.Message):
|
41 |
+
settings = cl.user_session.get("settings")
|
42 |
+
|
43 |
+
client = AsyncOpenAI()
|
44 |
+
|
45 |
+
print(message.content)
|
46 |
+
|
47 |
+
prompt = Prompt(
|
48 |
+
provider=ChatOpenAI.id,
|
49 |
+
messages=[
|
50 |
+
PromptMessage(
|
51 |
+
role="system",
|
52 |
+
template=system_template,
|
53 |
+
formatted=system_template,
|
54 |
+
),
|
55 |
+
PromptMessage(
|
56 |
+
role="user",
|
57 |
+
template=user_template,
|
58 |
+
formatted=user_template.format(input=message.content),
|
59 |
+
),
|
60 |
+
],
|
61 |
+
inputs={"input": message.content},
|
62 |
+
settings=settings,
|
63 |
+
)
|
64 |
+
|
65 |
+
print([m.to_openai() for m in prompt.messages])
|
66 |
+
|
67 |
+
msg = cl.Message(content="")
|
68 |
+
|
69 |
+
# Call OpenAI
|
70 |
+
async for steam_resp in await client.chat.completions.create(
|
71 |
+
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
|
72 |
+
):
|
73 |
+
token = stream_resp.choices[0].delta.content
|
74 |
+
if not token:
|
75 |
+
token = ""
|
76 |
+
await msg.stream_token(token)
|
77 |
+
|
78 |
+
# Update the prompt object with the completion
|
79 |
+
prompt.completion = msg.content
|
80 |
+
msg.prompt = prompt
|
81 |
+
|
82 |
+
# Send and Close the message stream
|
83 |
+
await msg.send()
|
requirements.txt
CHANGED
@@ -1,2 +1,6 @@
|
|
1 |
-
|
2 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
chainlit==0.7.700
|
2 |
+
cohere==4.37
|
3 |
+
openai==1.3.5
|
4 |
+
tiktoken==0.5.1
|
5 |
+
python-dotenv==1.0.0
|
6 |
+
pydantic==2.10.1
|