ggruiz-amd commited on
Commit
0d87b57
·
1 Parent(s): c704c9f

add all files for qwen

Browse files

no new max tokens

demo

remove max

test

remove dialog

Files changed (5) hide show
  1. .gitignore +103 -0
  2. app.py +129 -0
  3. gateway.py +128 -0
  4. requirements.txt +4 -0
  5. style.css +10 -0
.gitignore ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python build
2
+ .eggs/
3
+ gradio.egg-info
4
+ dist/
5
+ dist-lite/
6
+ *.pyc
7
+ __pycache__/
8
+ *.py[cod]
9
+ *$py.class
10
+ build/
11
+ !js/build/
12
+ !js/build/dist/
13
+ __tmp/*
14
+ *.pyi
15
+ !gradio/stubs/**/*.pyi
16
+ .ipynb_checkpoints/
17
+ .python-version
18
+ =23.2
19
+
20
+ # JS build
21
+ gradio/templates/*
22
+ gradio/node/*
23
+ gradio/_frontend_code/*
24
+ js/gradio-preview/test/*
25
+
26
+ # Secrets
27
+ .env
28
+
29
+ # Gradio run artifacts
30
+ *.db
31
+ *.sqlite3
32
+ gradio/launches.json
33
+ gradio/hash_seed.txt
34
+ .gradio/
35
+
36
+ tmp.zip
37
+
38
+ # Tests
39
+ .coverage
40
+ coverage.xml
41
+ test.txt
42
+ **/snapshots/**/*.png
43
+ playwright-report/
44
+ .hypothesis
45
+ .lite-perf.json
46
+
47
+ # Demos
48
+ demo/tmp.zip
49
+ demo/files/*.avi
50
+ demo/files/*.mp4
51
+ demo/all_demos/demos/*
52
+ demo/all_demos/requirements.txt
53
+ demo/*/config.json
54
+ demo/annotatedimage_component/*.png
55
+ demo/fake_diffusion_with_gif/*.gif
56
+ demo/cancel_events/cancel_events_output_log.txt
57
+ demo/unload_event_test/output_log.txt
58
+ demo/stream_video_out/output_*.ts
59
+ demo/stream_video_out/output_*.mp4
60
+ demo/stream_audio_out/*.mp3
61
+ #demo/image_editor_story/*.png
62
+
63
+ # Etc
64
+ .idea/*
65
+ .DS_Store
66
+ *.bak
67
+ workspace.code-workspace
68
+ *.h5
69
+
70
+ # dev containers
71
+ .pnpm-store/
72
+
73
+ # log files
74
+ .pnpm-debug.log
75
+
76
+ # Local virtualenv for devs
77
+ .venv*
78
+
79
+ # FRP
80
+ gradio/frpc_*
81
+ .vercel
82
+
83
+ # js
84
+ node_modules
85
+ public/build/
86
+ test-results
87
+ client/js/dist/*
88
+ client/js/test.js
89
+ .config/test.py
90
+ .svelte-kit
91
+
92
+
93
+ # storybook
94
+ storybook-static
95
+ build-storybook.log
96
+ js/storybook/theme.css
97
+ #js/storybook/public/output-image.png
98
+
99
+ # playwright
100
+ .config/playwright/.cache
101
+
102
+ # VSCode
103
+ .lh
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import gradio as gr
4
+ from typing import Iterator
5
+ from gateway import request_generation
6
+
7
+ # Setup logging
8
+ logging.basicConfig(level=logging.INFO)
9
+
10
+ # Validate environment variables
11
+ CLOUD_GATEWAY_API = os.getenv("API_ENDPOINT")
12
+ if not CLOUD_GATEWAY_API:
13
+ raise EnvironmentError("API_ENDPOINT is not set.")
14
+
15
+ MODEL_NAME: str = os.getenv("MODEL_NAME")
16
+ if not MODEL_NAME:
17
+ raise EnvironmentError("MODEL_NAME is not set.")
18
+
19
+ # Get API Key
20
+ API_KEY = os.getenv("API_KEY")
21
+ if not API_KEY: # simple check to validate API Key
22
+ raise Exception("API Key not valid.")
23
+
24
+ # Create a header, avoid declaring multiple times
25
+ HEADER = {"x-api-key": f"{API_KEY}"}
26
+
27
+ def generate(
28
+ message: str,
29
+ chat_history: list,
30
+ system_prompt: str,
31
+ temperature: float = 0.6,
32
+ frequency_penalty: float = 0.0,
33
+ presence_penalty: float = 0.0,
34
+ ) -> Iterator[str]:
35
+ """Send a request to backend, fetch the streaming responses and emit to the UI.
36
+
37
+ Args:
38
+ message (str): input message from the user
39
+ chat_history (list[tuple[str, str]]): entire chat history of the session
40
+ system_prompt (str): system prompt
41
+ temperature (float, optional): the value used to module the next token probabilities. Defaults to 0.6.
42
+ top_p (float, optional): if set to float<1, only the smallest set of most probable tokens with probabilities
43
+ that add up to top_p or higher are kept for generation. Defaults to 0.9.
44
+ top_k (int, optional): the number of highest probability vocabulary tokens to keep for top-k-filtering.
45
+ Defaults to 50.
46
+ repetition_penalty (float, optional): the parameter for repetition penalty. 1.0 means no penalty.
47
+ Defaults to 1.2.
48
+
49
+ Yields:
50
+ Iterator[str]: Streaming responses to the UI
51
+ """
52
+ # sample method to yield responses from the llm model
53
+ outputs = []
54
+ for text in request_generation(
55
+ header=HEADER,
56
+ message=message,
57
+ system_prompt=system_prompt,
58
+ temperature=temperature,
59
+ presence_penalty=presence_penalty,
60
+ frequency_penalty=frequency_penalty,
61
+ cloud_gateway_api=CLOUD_GATEWAY_API,
62
+ model_name=MODEL_NAME,
63
+ ):
64
+ outputs.append(text)
65
+ yield "".join(outputs)
66
+
67
+
68
+ description = """
69
+ This Space is an Alpha release that demonstrates the [Qwen3-30B-A3B](https://huggingface.co/Qwen/Qwen3-30B-A3B) model running on AMD MI300 infrastructure. The space is built with Qwen 3 [License](https://huggingface.co/Qwen/Qwen3-30B-A3B/blob/main/LICENSE). Feel free to play with it!
70
+ """
71
+
72
+ demo = gr.ChatInterface(
73
+ fn=generate,
74
+ type="messages",
75
+ chatbot=gr.Chatbot(
76
+ type="messages",
77
+ scale=2,
78
+ allow_tags=True,
79
+ ),
80
+ stop_btn=None,
81
+ additional_inputs=[
82
+ gr.Textbox(
83
+ label="System prompt",
84
+ value="You are a highly capable AI assistant. Provide accurate, concise, and fact-based responses that are directly relevant to the user's query. Avoid speculation, ensure logical consistency, and maintain clarity in longer outputs. Keep answers well-structured and under 1200 tokens unless explicitly requested otherwise.",
85
+ lines=3,
86
+ ),
87
+ gr.Slider(
88
+ label="Temperature",
89
+ minimum=0.1,
90
+ maximum=4.0,
91
+ step=0.1,
92
+ value=0.3,
93
+ ),
94
+ gr.Slider(
95
+ label="Frequency penalty",
96
+ minimum=-2.0,
97
+ maximum=2.0,
98
+ step=0.1,
99
+ value=0.0,
100
+ ),
101
+ gr.Slider(
102
+ label="Presence penalty",
103
+ minimum=-2.0,
104
+ maximum=2.0,
105
+ step=0.1,
106
+ value=0.0,
107
+ ),
108
+ ],
109
+ examples=[
110
+ ["Plan a three-day trip to Washington DC for Cherry Blossom Festival."],
111
+ [
112
+ "Compose a short, joyful musical piece for kids celebrating spring sunshine and blossom."
113
+ ],
114
+ ["Can you explain briefly to me what is the Python programming language?"],
115
+ ["Explain the plot of Cinderella in a sentence."],
116
+ ["How many hours does it take a man to eat a Helicopter?"],
117
+ ["Write a 100-word article on 'Benefits of Open-Source in AI research'."],
118
+ ],
119
+ cache_examples=False,
120
+ title="Qwen3-30B-A3B",
121
+ description=description,
122
+ )
123
+
124
+
125
+ if __name__ == "__main__":
126
+ demo.queue(
127
+ max_size=int(os.getenv("QUEUE")),
128
+ default_concurrency_limit=int(os.getenv("CONCURRENCY_LIMIT")),
129
+ ).launch()
gateway.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import requests
4
+ import urllib3
5
+
6
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
7
+
8
+ # Setup logging
9
+ logging.basicConfig(level=logging.INFO)
10
+
11
+
12
+ def check_server_health(cloud_gateway_api: str, header: dict) -> bool:
13
+ """
14
+ Use the appropriate API endpoint to check the server health.
15
+ Args:
16
+ cloud_gateway_api: API endpoint to probe.
17
+ header: Header for Authorization.
18
+
19
+ Returns:
20
+ True if server is active, false otherwise.
21
+ """
22
+ try:
23
+ response = requests.get(
24
+ cloud_gateway_api + "model/info",
25
+ headers=header,
26
+ verify=False,
27
+ )
28
+ response.raise_for_status()
29
+ return True
30
+ except requests.RequestException as e:
31
+ logging.error(f"Failed to check server health: {e}")
32
+ return False
33
+
34
+
35
+ def request_generation(
36
+ header: dict,
37
+ message: str,
38
+ system_prompt: str,
39
+ cloud_gateway_api: str,
40
+ model_name: str,
41
+ temperature: float = 0.3,
42
+ frequency_penalty: float = 0.0,
43
+ presence_penalty: float = 0.0,
44
+ ):
45
+ """
46
+ Request streaming generation from the cloud gateway API. Uses the simple requests module with stream=True to utilize
47
+ token-by-token generation from LLM.
48
+
49
+ Args:
50
+ header: authorization header for the API.
51
+ message: prompt from the user.
52
+ system_prompt: system prompt to append.
53
+ cloud_gateway_api (str): API endpoint to send the request.
54
+ temperature: the value used to module the next token probabilities.
55
+ top_p: if set to float<1, only the smallest set of most probable tokens with probabilities that add up to top_p
56
+ or higher are kept for generation.
57
+ repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty.
58
+
59
+ Returns:
60
+
61
+ """
62
+
63
+ payload = {
64
+ "model": model_name,
65
+ "messages": [
66
+ {"role": "system", "content": system_prompt},
67
+ {"role": "user", "content": message},
68
+ ],
69
+ "temperature": temperature,
70
+ "frequency_penalty": frequency_penalty,
71
+ "presence_penalty": presence_penalty,
72
+ "stream": True, # Enable streaming
73
+ "serving_runtime": "vllm",
74
+ }
75
+
76
+ try:
77
+ response = requests.post(
78
+ cloud_gateway_api + "chat/conversation",
79
+ headers=header,
80
+ json=payload,
81
+ verify=False,
82
+ )
83
+ response.raise_for_status()
84
+
85
+ # Append the conversation ID with the key X-Conversation-ID to the header
86
+ header["X-Conversation-ID"] = response.json()["conversationId"]
87
+
88
+ with requests.get(
89
+ cloud_gateway_api + f"conversation/stream",
90
+ headers=header,
91
+ verify=False,
92
+ stream=True,
93
+ ) as response:
94
+ for chunk in response.iter_lines():
95
+ if chunk:
96
+ # Convert the chunk from bytes to a string and then parse it as json
97
+ chunk_str = chunk.decode("utf-8")
98
+
99
+ # Remove the `data: ` prefix from the chunk if it exists
100
+ for _ in range(2):
101
+ if chunk_str.startswith("data: "):
102
+ chunk_str = chunk_str[len("data: ") :]
103
+
104
+ # Skip empty chunks
105
+ if chunk_str.strip() == "[DONE]":
106
+ break
107
+
108
+ # Parse the chunk into a JSON object
109
+ try:
110
+ chunk_json = json.loads(chunk_str)
111
+
112
+ # Extract the "content" field from the choices
113
+ if "choices" in chunk_json and chunk_json["choices"]:
114
+ content = chunk_json["choices"][0]["delta"].get(
115
+ "content", ""
116
+ )
117
+ else:
118
+ content = ""
119
+
120
+ # Print the generated content as it's streamed
121
+ if content:
122
+ yield content
123
+ except json.JSONDecodeError:
124
+ # Handle any potential errors in decoding
125
+ continue
126
+ except requests.RequestException as e:
127
+ logging.error(f"Failed to generate response: {e}")
128
+ yield "Server not responding. Please try again later."
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ numpy
2
+ pillow
3
+ fastapi
4
+ websockets
style.css ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ .contain {
7
+ max-width: 900px;
8
+ margin: auto;
9
+ padding-top: 1.5rem;
10
+ }