Jaward commited on
Commit
13cc517
·
verified ·
1 Parent(s): 3a0cb61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -44
app.py CHANGED
@@ -22,7 +22,6 @@ from autogen_ext.models.openai import OpenAIChatCompletionClient
22
  from autogen_ext.models.ollama import OllamaChatCompletionClient
23
  from autogen_ext.models.azure import AzureAIChatCompletionClient
24
  from azure.core.credentials import AzureKeyCredential
25
- from transformers import AutoModelForCausalLM, AutoTokenizer
26
  import traceback
27
  import soundfile as sf
28
  import tempfile
@@ -157,41 +156,6 @@ def html_with_progress(label, progress):
157
  </div>
158
  """
159
 
160
- # Qwen2.5-3B-Instruct Client
161
- class QwenChatCompletionClient:
162
- def __init__(self, model_name="Qwen/Qwen2.5-3B-Instruct"):
163
- self.model = AutoModelForCausalLM.from_pretrained(
164
- model_name,
165
- torch_dtype="auto",
166
- device_map="auto"
167
- )
168
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
169
- logger.info(f"Initialized Qwen model: {model_name}")
170
-
171
- def create_chat_completion(self, messages, max_tokens=512, temperature=0.7, top_p=0.9):
172
- try:
173
- text = self.tokenizer.apply_chat_template(
174
- messages,
175
- tokenize=False,
176
- add_generation_prompt=True
177
- )
178
- model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
179
-
180
- generated_ids = self.model.generate(
181
- **model_inputs,
182
- max_new_tokens=max_tokens,
183
- temperature=temperature,
184
- top_p=top_p
185
- )
186
- generated_ids = [
187
- output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
188
- ]
189
- response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
190
- return {"choices": [{"message": {"content": response}}]}
191
- except Exception as e:
192
- logger.error(f"Error in Qwen chat completion: {str(e)}")
193
- raise
194
-
195
  # Get model client based on selected service
196
  def get_model_client(service, api_key):
197
  if service == "OpenAI-gpt-4o-2024-08-06":
@@ -215,8 +179,6 @@ def get_model_client(service, api_key):
215
  "structured_output": False,
216
  }
217
  )
218
- elif service == "Qwen-Qwen2.5-3B-Instruct":
219
- return QwenChatCompletionClient()
220
  else:
221
  raise ValueError("Invalid service")
222
 
@@ -730,9 +692,9 @@ Example: 'Received {total_slides} slides, {total_slides} scripts, and HTML files
730
  logger.error("Expected %d slides, but received %d", total_slides, len(slides))
731
  yield (
732
  f"""
733
- <div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1猛
734
- <h2 style="color: #d9534f;">Incorrect number of slides</h2>
735
- <p style="margin-top: 20px;">Expected {total_slides} slides, but generated {len(slides)}. Please try again.</p>
736
  </div>
737
  """,
738
  []
@@ -1269,7 +1231,7 @@ with gr.Blocks(
1269
  h2 {text-align: center}
1270
  #lecture-container {font-family: 'Times New Roman', Times, serif;}
1271
  #slide-content {font-size: 48px; line-height: 1.2;}
1272
- #form-group {box-shadow: 0 0 2rem Ndarray(0, 0, 0, .14) !important; border-radius: 30px; font-weight: 900; color: #000; background-color: white;}
1273
  #download {box-shadow: 0 0 2rem rgba(0, 0, 0, .14) !important; border-radius: 30px;}
1274
  #slide-display {box-shadow: 0 0 2rem rgba(0, 0, 0, .14) !important; border-radius: 30px; background-color: white;}
1275
  button {transition: background-color 0.3s;}
@@ -1294,12 +1256,11 @@ with gr.Blocks(
1294
  "Anthropic-claude-3-sonnet-20240229",
1295
  "Google-gemini-1.5-flash",
1296
  "Ollama-llama3.2",
1297
- "Qwen-Qwen2.5-3B-Instruct",
1298
  ],
1299
  label="Model",
1300
  value="Google-gemini-1.5-flash"
1301
  )
1302
- api_key = gr.Textbox(label="Model Provider API Key", type="password", placeholder="Not required for Ollama, Qwen, or Azure AI Foundry (use GITHUB_TOKEN env var)")
1303
  serpapi_key = gr.Textbox(label="SerpApi Key (For Research Agent)", type="password", placeholder="Enter your SerpApi key (optional)")
1304
  num_slides = gr.Slider(1, 20, step=1, label="Number of Lecture Slides (will add intro and closing slides)", value=3)
1305
  speaker_audio = gr.Audio(value="feynman.mp3", label="Speaker sample speech (MP3 or WAV)", type="filepath", elem_id="speaker-audio")
 
22
  from autogen_ext.models.ollama import OllamaChatCompletionClient
23
  from autogen_ext.models.azure import AzureAIChatCompletionClient
24
  from azure.core.credentials import AzureKeyCredential
 
25
  import traceback
26
  import soundfile as sf
27
  import tempfile
 
156
  </div>
157
  """
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  # Get model client based on selected service
160
  def get_model_client(service, api_key):
161
  if service == "OpenAI-gpt-4o-2024-08-06":
 
179
  "structured_output": False,
180
  }
181
  )
 
 
182
  else:
183
  raise ValueError("Invalid service")
184
 
 
692
  logger.error("Expected %d slides, but received %d", total_slides, len(slides))
693
  yield (
694
  f"""
695
+ <div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
696
+ <h2 style="color: #d9534f;">Incorrect number of slides</h2>
697
+ <p style="margin-top: 20px;">Expected {total_slides} slides, but generated {len(slides)}. Please try again.</p>
698
  </div>
699
  """,
700
  []
 
1231
  h2 {text-align: center}
1232
  #lecture-container {font-family: 'Times New Roman', Times, serif;}
1233
  #slide-content {font-size: 48px; line-height: 1.2;}
1234
+ #form-group {box-shadow: 0 0 2rem rgba(0, 0, 0, .14) !important; border-radius: 30px; font-weight: 900; color: #000; background-color: white;}
1235
  #download {box-shadow: 0 0 2rem rgba(0, 0, 0, .14) !important; border-radius: 30px;}
1236
  #slide-display {box-shadow: 0 0 2rem rgba(0, 0, 0, .14) !important; border-radius: 30px; background-color: white;}
1237
  button {transition: background-color 0.3s;}
 
1256
  "Anthropic-claude-3-sonnet-20240229",
1257
  "Google-gemini-1.5-flash",
1258
  "Ollama-llama3.2",
 
1259
  ],
1260
  label="Model",
1261
  value="Google-gemini-1.5-flash"
1262
  )
1263
+ api_key = gr.Textbox(label="Model Provider API Key", type="password", placeholder="Not required for Ollama or Azure AI Foundry (use GITHUB_TOKEN env var)")
1264
  serpapi_key = gr.Textbox(label="SerpApi Key (For Research Agent)", type="password", placeholder="Enter your SerpApi key (optional)")
1265
  num_slides = gr.Slider(1, 20, step=1, label="Number of Lecture Slides (will add intro and closing slides)", value=3)
1266
  speaker_audio = gr.Audio(value="feynman.mp3", label="Speaker sample speech (MP3 or WAV)", type="filepath", elem_id="speaker-audio")