sofiavalan commited on
Commit
b2d6a82
·
verified ·
1 Parent(s): f5814d9

Upload 7 files

Browse files
Files changed (7) hide show
  1. Dockerfile +42 -0
  2. README.md +195 -7
  3. __pycache__/app.cpython-313.pyc +0 -0
  4. app.py +80 -0
  5. chainlit.md +3 -0
  6. pyproject.toml +14 -0
  7. uv.lock +0 -0
Dockerfile ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Get a distribution that has uv already installed
2
+ FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim
3
+
4
+ # Add Rust compiler installation
5
+ USER root
6
+ RUN apt-get update && apt-get install -y \
7
+ curl \
8
+ build-essential \
9
+ && rm -rf /var/lib/apt/lists/*
10
+ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
11
+ ENV PATH="/root/.cargo/bin:${PATH}"
12
+
13
+ # Add user - this is the user that will run the app
14
+ # If you do not set user, the app will run as root (undesirable)
15
+ RUN useradd -m -u 1000 user
16
+ USER user
17
+
18
+ # Set up Rust for the user
19
+ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
20
+ ENV PATH="/home/user/.cargo/bin:${PATH}"
21
+
22
+ # Set the home directory and path
23
+ ENV HOME=/home/user \
24
+ PATH=/home/user/.local/bin:$PATH
25
+
26
+ ENV UVICORN_WS_PROTOCOL=websockets
27
+
28
+ # Set the working directory
29
+ WORKDIR $HOME/app
30
+
31
+ # Copy the app to the container
32
+ COPY --chown=user . $HOME/app
33
+
34
+ # Install the dependencies
35
+ # RUN uv sync --frozen
36
+ RUN uv sync
37
+
38
+ # Expose the port
39
+ EXPOSE 7860
40
+
41
+ # Run the app
42
+ CMD ["uv", "run", "chainlit", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,12 +1,200 @@
1
  ---
2
- title: Llm App
3
- emoji: 🌖
4
- colorFrom: red
5
- colorTo: green
6
  sdk: docker
7
  pinned: false
8
- license: openrail
9
- short_description: llm-app
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: BeyondChatGPT Demo
3
+ emoji: 📉
4
+ colorFrom: pink
5
+ colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
+ app_port: 7860
 
9
  ---
10
 
11
+ <p align = "center" draggable=”false” ><img src="https://github.com/AI-Maker-Space/LLM-Dev-101/assets/37101144/d1343317-fa2f-41e1-8af1-1dbb18399719"
12
+ width="200px"
13
+ height="auto"/>
14
+ </p>
15
+
16
+
17
+ ## <h1 align="center" id="heading">:wave: Welcome to Beyond ChatGPT!!</h1>
18
+
19
+ For a step-by-step YouTube video walkthrough, watch this! [Deploying Chainlit app on Hugging Face](https://www.youtube.com/live/pRbbZcL0NMI?si=NAYhMZ_suAY84f06&t=2119)
20
+
21
+ ![Beyond ChatGPT: Build Your First LLM Application](https://github.com/AI-Maker-Space/Beyond-ChatGPT/assets/48775140/cb7a74b8-28af-4d12-a008-8f5a51d47b4c)
22
+
23
+ ## 🤖 Your First LLM App
24
+
25
+ > If you need an introduction to `git`, or information on how to set up API keys for the tools we'll be using in this repository - check out our [Interactive Dev Environment for LLM Development](https://github.com/AI-Maker-Space/Interactive-Dev-Environment-for-LLM-Development/tree/main) which has everything you'd need to get started in this repository!
26
+
27
+ In this repository, we'll walk you through the steps to create a Large Language Model (LLM) application using Chainlit, then containerize it using Docker, and finally deploy it on Huggingface Spaces.
28
+
29
+ Are you ready? Let's get started!
30
+
31
+ <details>
32
+ <summary>🖥️ Accessing "gpt-3.5-turbo" (ChatGPT) like a developer</summary>
33
+
34
+ 1. Head to [this notebook](https://colab.research.google.com/drive/1mOzbgf4a2SP5qQj33ZxTz2a01-5eXqk2?usp=sharing) and follow along with the instructions!
35
+
36
+ 2. Complete the notebook and try out your own system/assistant messages!
37
+
38
+ That's it! Head to the next step and start building your application!
39
+
40
+ </details>
41
+
42
+
43
+ <details>
44
+ <summary>🏗️ Building Your First LLM App</summary>
45
+
46
+ 1. Clone [this](https://github.com/AI-Maker-Space/Beyond-ChatGPT/tree/main) repo.
47
+
48
+ ``` bash
49
+ git clone https://github.com/AI-Maker-Space/Beyond-ChatGPT.git
50
+ ```
51
+
52
+ 2. Navigate inside this repo
53
+ ``` bash
54
+ cd Beyond-ChatGPT
55
+ ```
56
+
57
+ 3. Create a virtual environment and install dependencies.
58
+ ``` bash
59
+ # Create a virtual environment
60
+ uv venv
61
+
62
+ # Activate the virtual environment
63
+ # On macOS/Linux:
64
+ source .venv/bin/activate
65
+ # On Windows:
66
+ # .venv\Scripts\activate
67
+
68
+ # Install dependencies from pyproject.toml
69
+ uv sync
70
+ ```
71
+
72
+ 4. Open your `.env` file. Replace the `###` in your `.env` file with your OpenAI Key and save the file.
73
+ ``` bash
74
+ OPENAI_API_KEY=sk-###
75
+ ```
76
+
77
+ 5. Let's try deploying it locally. Make sure you're in the activated virtual environment. Run the app using Chainlit. This may take a minute to run.
78
+ ``` bash
79
+ uv run chainlit run app.py -w
80
+ ```
81
+
82
+ <p align = "center" draggable="false">
83
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/54bcccf9-12e2-4cef-ab53-585c1e2b0fb5">
84
+ </p>
85
+
86
+ Great work! Let's see if we can interact with our chatbot.
87
+
88
+ <p align = "center" draggable="false">
89
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/854e4435-1dee-438a-9146-7174b39f7c61">
90
+ </p>
91
+
92
+ Awesome! Time to throw it into a docker container and prepare it for shipping!
93
+ </details>
94
+
95
+
96
+
97
+ <details>
98
+ <summary>🐳 Containerizing our App</summary>
99
+
100
+ 1. Let's build the Docker image. We'll tag our image as `llm-app` using the `-t` parameter. The `.` at the end means we want all of the files in our current directory to be added to our image. Note that our Dockerfile is set up to use uv for dependency management and will install all the packages defined in our pyproject.toml file.
101
+
102
+ ``` bash
103
+ docker build -t llm-app .
104
+ ```
105
+
106
+ 2. Run and test the Docker image locally using the `run` command. The `-p`parameter connects our **host port #** to the left of the `:` to our **container port #** on the right.
107
+
108
+ ``` bash
109
+ docker run -p 7860:7860 llm-app
110
+ ```
111
+
112
+ 3. Visit http://localhost:7860 in your browser to see if the app runs correctly.
113
+
114
+ <p align = "center" draggable="false">
115
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/2c764f25-09a0-431b-8d28-32246e0ca1b7">
116
+ </p>
117
+
118
+ Great! Time to ship!
119
+ </details>
120
+
121
+
122
+ <details>
123
+ <summary>🚀 Deploying Your First LLM App</summary>
124
+
125
+ 1. Let's create a new Huggingface Space. Navigate to [Huggingface](https://huggingface.co) and click on your profile picture on the top right. Then click on `New Space`.
126
+
127
+ <p align = "center" draggable=”false”>
128
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/f0656408-28b8-4876-9887-8f0c4b882bae">
129
+ </p>
130
+
131
+ 2. Setup your space as shown below:
132
+
133
+ - Owner: Your username
134
+ - Space Name: `llm-app`
135
+ - License: `Openrail`
136
+ - Select the Space SDK: `Docker`
137
+ - Docker Template: `Blank`
138
+ - Space Hardware: `CPU basic - 2 vCPU - 16 GB - Free`
139
+ - Repo type: `Public`
140
+
141
+ <p align = "center" draggable=”false”>
142
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/8f16afd1-6b46-4d9f-b642-8fefe355c5c9">
143
+ </p>
144
+
145
+ 3. You should see something like this. We're now ready to send our files to our Huggingface Space. After cloning, move your files to this repo and push it along with your docker file. You DO NOT need to create a Dockerfile. Make sure NOT TO push your `.env` file. This should automatically be ignored.
146
+
147
+ <p align = "center" draggable=”false”>
148
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/cbf366e2-7613-4223-932a-72c67a73f9c6">
149
+ </p>
150
+
151
+ 4. After pushing all files, navigate to the settings in the top right to add your OpenAI API key.
152
+
153
+ <p align = "center" draggable=”false”>
154
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/a1123a6f-abdd-4f76-bea4-39acf9928762">
155
+ </p>
156
+
157
+ 5. Scroll down to `Variables and secrets` and click on `New secret` on the top right.
158
+
159
+ <p align = "center" draggable=”false”>
160
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/a8a4a25d-752b-4036-b572-93381370c2db">
161
+ </p>
162
+
163
+ 6. Set the name to `OPENAI_API_KEY` and add your OpenAI key under `Value`. Click save.
164
+
165
+ <p align = "center" draggable=”false”>
166
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/0a897538-1779-48ff-bcb4-486af30f7a14">
167
+ </p>
168
+
169
+ 7. To ensure your key is being used, we recommend you `Restart this Space`.
170
+
171
+ <p align = "center" draggable=”false”>
172
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/fb1d83af-6ebe-4676-8bf5-b6d88f07c583">
173
+ </p>
174
+
175
+ 8. Congratulations! You just deployed your first LLM! 🚀🚀🚀 Get on linkedin and post your results and experience! Make sure to tag us at #AIMakerspace !
176
+
177
+ Here's a template to get your post started!
178
+
179
+ ```
180
+ 🚀🎉 Exciting News! 🎉🚀
181
+
182
+ 🏗️ Today, I'm thrilled to announce that I've successfully built and shipped my first-ever LLM using the powerful combination of Chainlit, Docker, and the OpenAI API! 🖥️
183
+
184
+ Check it out 👇
185
+ [LINK TO APP]
186
+
187
+ A big shoutout to the @**AI Makerspace** for all making this possible. Couldn't have done it without the incredible community there. 🤗🙏
188
+
189
+ Looking forward to building with the community! 🙌✨ Here's to many more creations ahead! 🥂🎉
190
+
191
+ Who else is diving into the world of AI? Let's connect! 🌐💡
192
+
193
+ #FirstLLM #Chainlit #Docker #OpenAI #AIMakerspace
194
+ ```
195
+
196
+ </details>
197
+
198
+ <p></p>
199
+
200
+ ### That's it for now! And so it begins.... :)
__pycache__/app.cpython-313.pyc ADDED
Binary file (3.07 kB). View file
 
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
2
+
3
+ # OpenAI Chat completion
4
+ import os
5
+ from openai import AsyncOpenAI # importing openai for API usage
6
+ import chainlit as cl # importing chainlit for our app
7
+ from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
8
+ from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
9
+ from dotenv import load_dotenv
10
+
11
+ load_dotenv()
12
+
13
+ # ChatOpenAI Templates
14
+ system_template = """You are a helpful assistant who always speaks in a pleasant tone!
15
+ """
16
+
17
+ user_template = """{input}
18
+ Think through your response step by step.
19
+ """
20
+
21
+
22
+ @cl.on_chat_start # marks a function that will be executed at the start of a user session
23
+ async def start_chat():
24
+ settings = {
25
+ "model": "gpt-3.5-turbo",
26
+ "temperature": 0,
27
+ "max_tokens": 500,
28
+ "top_p": 1,
29
+ "frequency_penalty": 0,
30
+ "presence_penalty": 0,
31
+ }
32
+
33
+ cl.user_session.set("settings", settings)
34
+
35
+
36
+ @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
37
+ async def main(message: cl.Message):
38
+ settings = cl.user_session.get("settings")
39
+
40
+ client = AsyncOpenAI()
41
+
42
+ print(message.content)
43
+
44
+ prompt = Prompt(
45
+ provider=ChatOpenAI.id,
46
+ messages=[
47
+ PromptMessage(
48
+ role="system",
49
+ template=system_template,
50
+ formatted=system_template,
51
+ ),
52
+ PromptMessage(
53
+ role="user",
54
+ template=user_template,
55
+ formatted=user_template.format(input=message.content),
56
+ ),
57
+ ],
58
+ inputs={"input": message.content},
59
+ settings=settings,
60
+ )
61
+
62
+ print([m.to_openai() for m in prompt.messages])
63
+
64
+ msg = cl.Message(content="")
65
+
66
+ # Call OpenAI
67
+ async for stream_resp in await client.chat.completions.create(
68
+ messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
69
+ ):
70
+ token = stream_resp.choices[0].delta.content
71
+ if not token:
72
+ token = ""
73
+ await msg.stream_token(token)
74
+
75
+ # Update the prompt object with the completion
76
+ prompt.completion = msg.content
77
+ msg.prompt = prompt
78
+
79
+ # Send and close the message stream
80
+ await msg.send()
chainlit.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Beyond ChatGPT
2
+
3
+ This Chainlit app was created following instructions from [this repository!](https://github.com/AI-Maker-Space/Beyond-ChatGPT)
pyproject.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "beyond-chatgpt"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "chainlit==0.7.700",
9
+ "cohere==4.37",
10
+ "openai==1.3.5",
11
+ "pydantic==2.10.1",
12
+ "python-dotenv==1.0.0",
13
+ "tiktoken==0.5.1",
14
+ ]
uv.lock ADDED
The diff for this file is too large to render. See raw diff