Spaces:
Running
Running
| import base64 | |
| import gradio as gr | |
| import json | |
| import mimetypes # Used in MiniMax template for base64 encoding, though not directly in my code for now | |
| import os | |
| import requests # MiniMax template uses requests for its API calls | |
| import time | |
| import re # For regex to extract code blocks | |
| import threading # For running agent asynchronously | |
| # Import modelscope_studio components | |
| import modelscope_studio.components.antd as antd | |
| import modelscope_studio.components.antdx as antdx | |
| import modelscope_studio.components.base as ms | |
| import modelscope_studio.components.pro as pro # pro.Chatbot etc. | |
| from modelscope_studio.components.pro.chatbot import ( | |
| ChatbotActionConfig, ChatbotBotConfig, ChatbotMarkdownConfig, | |
| ChatbotPromptsConfig, ChatbotUserConfig, ChatbotWelcomeConfig | |
| ) | |
| # Your existing smolagents imports | |
| from run import create_agent, run_agent_with_streaming | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| CONFIG_FILE = ".user_config.env" | |
| # --- Constants and Helper Functions from MiniMaxAI template --- | |
| # (Adapt paths and values as per your project structure) | |
| # Dummy EXAMPLES and DEFAULT_PROMPTS for the Code Playground (replace with your actual data) | |
| EXAMPLES = { | |
| "UI Components": [ | |
| {"title": "Simple Button", "description": "Generate a simple HTML button with hover effect."}, | |
| {"title": "Responsive Nav Bar", "description": "Create a responsive navigation bar using HTML and CSS."}, | |
| ], | |
| "Games & Visualizations": [ | |
| {"title": "Maze Generator and Pathfinding Visualizer", "description": "Create a maze generator and pathfinding visualizer. Randomly generate a maze and visualize A* algorithm solving it step by step. Use canvas and animations. Make it visually appealing."}, | |
| {"title": "Particle Explosion Effect", "description": "Implement a particle explosion effect when the user clicks anywhere on the page."}, | |
| ], | |
| "Interactive Apps": [ | |
| {"title": "Typing Speed Game", "description": "Build a typing speed test web app. Randomly show a sentence, and track the user's typing speed in WPM (words per minute). Provide live feedback with colors and accuracy."}, | |
| {"title": "Simple Calculator", "description": "Generate a basic four-function calculator with a user-friendly interface."}, | |
| ], | |
| } | |
| # The SYSTEM_PROMPT for code generation, now as a constant | |
| SYSTEM_PROMPT_CODE_GEN = """ | |
| You are an expert web developer. Your task is to write a complete, single HTML file | |
| (including all necessary CSS and JavaScript within <style> and <script> tags, or as data URIs for images if any) | |
| that directly solves the user's request. | |
| - Do NOT use external stylesheets or scripts, unless explicitly requested and only if absolutely necessary (e.g., a CDN for a well-known library). | |
| - Your output MUST be a complete HTML document, enclosed in ```html ... ``` code block. | |
| - For interactive elements, use pure JavaScript or standard libraries. | |
| - If the user asks for a simple visualization, use HTML, CSS, and SVG or Canvas. | |
| - Ensure the HTML is self-contained and ready to be rendered in an iframe. | |
| - Provide a brief reasoning *before* the code block, explaining your approach. | |
| """ | |
| # Dummy DEFAULT_PROMPTS for the Chatbot (if your chatbot uses them) | |
| DEFAULT_PROMPTS = [ | |
| {"description": "What is the capital of France?"}, | |
| {"description": "Explain quantum entanglement in simple terms."}, | |
| {"description": "Write a short story about a brave knight."}, | |
| ] | |
| # --- Helper Functions from MiniMaxAI Template (adapted for your app) --- | |
| def remove_code_block(text): | |
| """ | |
| Extracts the content of the first Markdown code block (```html ... ``` or ``` ... ```) | |
| from a given text. If no code block is found, it checks if the text is raw HTML. | |
| """ | |
| patterns = [ | |
| r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML | |
| r'```\n([\s\S]+?)\n```', # Match code blocks without language markers | |
| r'```([\s\S]+?)```' # Match inline code blocks (less likely for full HTML) | |
| ] | |
| for pattern in patterns: | |
| match = re.search(pattern, text, re.DOTALL) | |
| if match: | |
| extracted = match.group(1).strip() | |
| print("[DEBUG] Successfully extracted code block.") | |
| return extracted | |
| # If no code block is found, check if the entire text looks like HTML | |
| if text.strip().startswith(('<!DOCTYPE html>', '<html')): | |
| print("[DEBUG] Text appears to be raw HTML, using as is.") | |
| return text.strip() | |
| print("[DEBUG] No code block found in text. Returning original text (may not be valid HTML).") | |
| return text.strip() | |
| def send_to_sandbox(code): | |
| """ | |
| Wraps HTML code in a sandbox iframe data URI. | |
| Includes basic safety measures like replacing localStorage and onerror. | |
| """ | |
| wrapped_code = f""" | |
| <!DOCTYPE html> | |
| <html> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <script> | |
| // Create a safe storage alternative | |
| const safeStorage = {{ | |
| _data: {{}}, | |
| getItem: function(key) {{ | |
| return this._data[key] || null; | |
| }}, | |
| setItem: function(key, value) {{ | |
| this._data[key] = value; | |
| }}, | |
| removeItem: function(key) {{ | |
| delete this._data[key]; | |
| }}, | |
| clear: function() {{ | |
| this._data = {{}}; | |
| }} | |
| }}; | |
| // Replace native localStorage | |
| Object.defineProperty(window, 'localStorage', {{ | |
| value: safeStorage, | |
| writable: false | |
| }}); | |
| // Add error handling without using alert | |
| window.onerror = function(message, source, lineno, colno, error) {{ | |
| console.error('Error in sandbox:', message); | |
| }}; | |
| </script> | |
| <style> | |
| /* Basic default body styling for generated code */ | |
| body {{ margin: 0; padding: 10px; font-family: sans-serif; }} | |
| </style> | |
| </head> | |
| <body> | |
| {code} | |
| </body> | |
| </html> | |
| """ | |
| encoded_html = base64.b64encode(wrapped_code.encode('utf-8')).decode('utf-8') | |
| data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}" | |
| iframe_html = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>' | |
| print("[DEBUG] Generated iframe for sandbox.") | |
| return iframe_html | |
| def select_example(example_state): | |
| """Function to set the input textbox value from an example card.""" | |
| # Assuming example_state is a dictionary with a 'description' key | |
| return gr.update(value=example_state.get("description", "")) | |
| # --- Your existing save_env_vars_to_file (from your original code) --- | |
| def save_env_vars_to_file(env_vars): | |
| print("[DEBUG] Saving user config to file") | |
| with open(CONFIG_FILE, "w") as f: | |
| for key, value in env_vars.items(): | |
| f.write(f"{key}={value}\n") | |
| # --- CSS from MiniMaxAI template --- | |
| CUSTOM_CSS = """ | |
| /* Add styles for the main container */ | |
| .ant-tabs-content { | |
| height: calc(100vh - 200px); | |
| overflow: hidden; | |
| } | |
| .ant-tabs-tabpane { | |
| height: 100%; | |
| overflow-y: auto; | |
| } | |
| /* Modify existing styles */ | |
| .output-empty,.output-loading { | |
| display: flex; | |
| flex-direction: column; | |
| align-items: center; | |
| justify-content: center; | |
| width: 100%; | |
| min-height: 680px; | |
| position: relative; | |
| } | |
| .output-html { | |
| display: flex; | |
| flex-direction: column; | |
| width: 100%; | |
| min-height: 680px; | |
| } | |
| .output-html > iframe { | |
| flex: 1; | |
| } | |
| .right_content { | |
| display: flex; | |
| flex-direction: column; | |
| align-items: center; | |
| justify-content: center; | |
| width: 100%; | |
| height: 100%; | |
| min-height: unset; | |
| background: #fff; | |
| border-radius: 8px; | |
| box-shadow: 0 2px 8px rgba(0,0,0,0.1); | |
| } | |
| /* Add styles for the code playground container */ | |
| .code-playground-container { | |
| height: 100%; | |
| overflow-y: auto; | |
| padding-right: 8px; | |
| } | |
| .code-playground-container::-webkit-scrollbar { | |
| width: 6px; | |
| } | |
| .code-playground-container::-webkit-scrollbar-track { | |
| background: #f1f1f1; | |
| border-radius: 3px; | |
| } | |
| .code-playground-container::-webkit-scrollbar-thumb { | |
| background: #888; | |
| border-radius: 3px; | |
| } | |
| .code-playground-container::-webkit-scrollbar-thumb:hover { | |
| background: #555; | |
| } | |
| .render_header { | |
| display: flex; | |
| align-items: center; | |
| padding: 8px 16px; | |
| background: #f5f5f5; | |
| border-bottom: 1px solid #e8e8e8; | |
| border-top-left-radius: 8px; | |
| border-top-right-radius: 8px; | |
| } | |
| .header_btn { | |
| width: 12px; | |
| height: 12px; | |
| border-radius: 50%; | |
| margin-right: 8px; | |
| display: inline-block; | |
| } | |
| .header_btn:nth-child(1) { | |
| background: #ff5f56; | |
| } | |
| .header_btn:nth-child(2) { | |
| background: #ffbd2e; | |
| } | |
| .header_btn:nth-child(3) { | |
| background: #27c93f; | |
| } | |
| .output-html > iframe { | |
| flex: 1; | |
| border: none; | |
| background: #fff; | |
| } | |
| .reasoning-box { | |
| max-height: 300px; | |
| overflow-y: auto; | |
| border-radius: 4px; | |
| font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; | |
| font-size: 14px; | |
| line-height: 1.6; | |
| width: 100%; | |
| scroll-behavior: smooth; | |
| display: flex; | |
| flex-direction: column-reverse; | |
| } | |
| .reasoning-box .ms-markdown { /* Targeting markdown within the box for modelscope */ | |
| padding: 0 12px; | |
| } | |
| .reasoning-box::-webkit-scrollbar { | |
| width: 6px; | |
| } | |
| .reasoning-box::-webkit-scrollbar-track { | |
| background: #f1f1f1; | |
| border-radius: 3px; | |
| } | |
| .reasoning-box::-webkit-scrollbar-thumb { | |
| background: #888; | |
| border-radius: 3px; | |
| } | |
| .reasoning-box::-webkit-scrollbar-thumb:hover { | |
| background: #555; | |
| } | |
| .markdown-container { | |
| max-height: 300px; | |
| overflow-y: auto; | |
| border-radius: 4px; | |
| font-family: -apple-system, BlinkMacMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; | |
| font-size: 14px; | |
| line-height: 1.6; | |
| width: 100%; | |
| scroll-behavior: smooth; | |
| display: flex; | |
| flex-direction: column-reverse; | |
| } | |
| /* Example card styles */ | |
| .example-card { | |
| flex: 1 1 calc(50% - 20px); | |
| max-width: calc(50% - 20px); | |
| margin: 6px; | |
| transition: all 0.3s; | |
| cursor: pointer; | |
| border: 1px solid #e8e8e8; | |
| border-radius: 8px; | |
| box-shadow: 0 2px 8px rgba(0,0,0,0.05); | |
| } | |
| .example-card:hover { | |
| transform: translateY(-4px); | |
| box-shadow: 0 4px 12px rgba(0,0,0,0.1); | |
| border-color: #d9d9d9; | |
| } | |
| .example-card .ant-card-meta-title { | |
| font-size: 16px; | |
| font-weight: 500; | |
| margin-bottom: 8px; | |
| color: #262626; | |
| } | |
| .example-card .ant-card-meta-description { | |
| color: #666; | |
| font-size: 14px; | |
| line-height: 1.5; | |
| } | |
| /* Example tabs styles */ | |
| .example-tabs .ant-tabs-nav { | |
| margin-bottom: 16px; | |
| } | |
| .example-tabs .ant-tabs-tab { | |
| padding: 8px 16px; | |
| font-size: 15px; | |
| } | |
| .example-tabs .ant-tabs-tab-active { | |
| font-weight: 500; | |
| } | |
| /* Empty state styles */ | |
| /* Corrected to match the target's `.right_content` for empty state */ | |
| .right_content .output-empty { | |
| display: flex; | |
| flex-direction: column; | |
| align-items: center; | |
| justify-content: center; | |
| width: 100%; | |
| min-height: 620px; /* Adjusted to match original */ | |
| background: #fff; | |
| border-radius: 8px; | |
| box-shadow: 0 2px 8px rgba(0,0,0,0.1); | |
| } | |
| /* Add styles for the example cards container */ | |
| .example-tabs .ant-tabs-content { | |
| padding: 0 8px; | |
| } | |
| .example-tabs .ant-flex { | |
| margin: 0 -8px; | |
| width: calc(100% + 16px); | |
| } | |
| """ | |
| # --- Main Gradio Interface Launch Function --- | |
| def launch_interface(): | |
| # --- Chatbot Tab Logic (Your existing logic, using gr.gr components) --- | |
| def setup_agent_streaming(question, model_id, hf_token, openai_api_key, serpapi_key, api_endpoint, use_custom_endpoint, | |
| custom_api_endpoint, custom_api_key, search_provider, search_api_key, custom_search_url): | |
| print("[DEBUG] Setting up agent with input question:", question) | |
| if question.strip() == "": | |
| yield "<p style='color:#ef4444;'>Please enter a question.</p>", "" # Use HTML for error | |
| return | |
| endpoint = custom_api_endpoint if use_custom_endpoint else api_endpoint | |
| api_key = custom_api_key if use_custom_endpoint else openai_api_key | |
| save_env_vars_to_file({ | |
| "HF_TOKEN": hf_token, | |
| "SERPAPI_API_KEY": serpapi_key, | |
| "API_ENDPOINT": api_endpoint, | |
| "OPENAI_API_KEY": openai_api_key | |
| }) | |
| print("[DEBUG] Instantiating agent with UI configuration") | |
| agent = create_agent( | |
| model_id=model_id, | |
| hf_token=hf_token, | |
| serpapi_key=serpapi_key, | |
| openai_api_key=openai_api_key, | |
| api_endpoint=api_endpoint, | |
| custom_api_endpoint=endpoint, | |
| custom_api_key=api_key, | |
| search_provider=search_provider, | |
| search_api_key=search_api_key, | |
| custom_search_url=custom_search_url | |
| ) | |
| output_html_buffer = [] # Store HTML snippets | |
| final_answer_text = "" | |
| is_complete = False | |
| def highlight_text(text): | |
| # This function will now produce self-contained HTML snippets | |
| if "[COMPLETED] " in text: | |
| nonlocal final_answer_text | |
| final_answer_text = text.split(":", 1)[1].strip() | |
| return f"<p><span style='color:#10b981;font-weight:bold;'>[FINAL]</span> <mark>{final_answer_text}</mark></p>" | |
| elif "[ERROR]" in text: | |
| return f"<p><span style='color:#ef4444;font-weight:bold;'>[ERROR]</span> <pre>{text.strip()}</pre></p>" | |
| elif "[STARTING]" in text: | |
| return f"<p><span style='color:#f59e0b;font-weight:bold;'>[STEP]</span> {text.strip()}</p>" | |
| elif text.strip(): | |
| # Wrap regular steps in details tag for collapsing | |
| return f"<details><summary><span style='color:#f59e0b;'>Step</span></summary>\n<pre>{text.strip()}</pre>\n</details>" | |
| return "" | |
| def stream_callback(text): | |
| # The callback now appends the *formatted* HTML directly | |
| formatted_html = highlight_text(text) | |
| if formatted_html: | |
| output_html_buffer.append(formatted_html) | |
| def run_agent_async(): | |
| nonlocal is_complete | |
| try: | |
| run_agent_with_streaming(agent, question, stream_callback) | |
| except Exception as e: | |
| output_html_buffer.append(highlight_text(f"[ERROR] {str(e)}")) | |
| finally: | |
| is_complete = True | |
| agent_thread = threading.Thread(target=run_agent_async) | |
| agent_thread.start() | |
| last_buffer_length = 0 | |
| # Yield initial message | |
| yield "<p><i>Agent started...</i></p>", "" # Initial message for immediate feedback | |
| while not is_complete or agent_thread.is_alive(): | |
| # Check if new content has been added to the buffer | |
| if len(output_html_buffer) > last_buffer_length: | |
| # Join only the new content, or the entire buffer for cumulative display | |
| current_html_output = "".join(output_html_buffer) | |
| yield current_html_output, final_answer_text | |
| last_buffer_length = len(current_html_output) | |
| time.sleep(0.05) # Smaller delay for more responsive updates | |
| # Ensure final state is yielded | |
| final_html_output = "".join(output_html_buffer) | |
| yield final_html_output, final_answer_text | |
| # --- Code Playground Tab Logic (Using modelscope_studio components) --- | |
| def generate_code_streaming(query, model_id, hf_token, openai_api_key, serpapi_key, api_endpoint, use_custom_endpoint, | |
| custom_api_endpoint, custom_api_key): | |
| print(f"[DEBUG] Starting code generation with query: {query}") | |
| if query.strip() == "": | |
| # Reset outputs and show empty state | |
| # Yield for reasoning_output (Markdown), code_output_raw (Code), sandbox_output (HTML) | |
| # code_output_tabs_container (antd.Tabs, for active_key and visibility) | |
| # loading_state_group (gr.Group, for visibility) | |
| # loading_tip (gr.State, for value and visibility) | |
| yield gr.update(value=""), gr.update(value=""), gr.update(value=""), \ | |
| gr.update(selected="empty", visible=False), gr.update(visible=True), \ | |
| gr.update(value="Enter your request to generate code", visible=False) | |
| return | |
| endpoint = custom_api_endpoint if use_custom_endpoint else api_endpoint | |
| api_key = custom_api_key if use_custom_endpoint else openai_api_key | |
| agent = create_agent( | |
| model_id=model_id, | |
| hf_token=hf_token, | |
| openai_api_key=openai_api_key, | |
| serpapi_key=serpapi_key, # May not be needed for pure code gen, but kept for consistency | |
| api_endpoint=api_endpoint, | |
| custom_api_endpoint=endpoint, | |
| custom_api_key=api_key, | |
| search_provider="none", # Explicitly set to none if not used | |
| search_api_key=None, | |
| custom_search_url=None | |
| ) | |
| # Corrected: Set the system prompt using prompt_templates as per the error message. | |
| if hasattr(agent, 'prompt_templates'): | |
| if "system_prompt" in agent.prompt_templates: | |
| agent.prompt_templates["system_prompt"] = SYSTEM_PROMPT_CODE_GEN | |
| print("[DEBUG] Set agent.prompt_templates['system_prompt'] for code generation.") | |
| elif 'user_agent' in agent.prompt_templates and 'system_message' in agent.prompt_templates['user_agent']: | |
| agent.prompt_templates['user_agent']['system_message'] = SYSTEM_PROMPT_CODE_GEN | |
| print("[DEBUG] Set agent.prompt_templates['user_agent']['system_message'] for code generation.") | |
| else: | |
| print("[WARNING] Could not set system prompt for CodeAgent using known patterns. " | |
| "Agent might not follow code generation instructions optimally.") | |
| # Fallback: Prepend to the question if no proper system prompt mechanism | |
| query = SYSTEM_PROMPT_CODE_GEN + "\n\n" + query | |
| else: | |
| print("[WARNING] Agent has no 'prompt_templates' attribute. Cannot set system prompt.") | |
| query = SYSTEM_PROMPT_CODE_GEN + "\n\n" + query | |
| reasoning_text_buffer = [] # Buffer for the raw text of reasoning/code combined | |
| final_generated_code_content = "" # Store the final extracted code | |
| is_agent_run_complete = False # Flag for the async agent run completion | |
| # Callback for the run_agent_with_streaming | |
| def code_gen_stream_callback(text_chunk): | |
| nonlocal reasoning_text_buffer | |
| reasoning_text_buffer.append(text_chunk) | |
| # Function to run the agent asynchronously | |
| def run_agent_async_for_codegen(): | |
| nonlocal is_agent_run_complete, final_generated_code_content | |
| try: | |
| # The run_agent_with_streaming returns the final answer | |
| final_answer_from_agent = run_agent_with_streaming(agent, query, code_gen_stream_callback) | |
| # Ensure the final answer from agent.run is captured | |
| final_generated_code_content = final_answer_from_agent | |
| except Exception as e: | |
| reasoning_text_buffer.append(f"[ERROR] {str(e)}\n") | |
| finally: | |
| is_agent_run_complete = True | |
| # Start agent in background thread | |
| agent_thread = threading.Thread(target=run_agent_async_for_codegen) | |
| agent_thread.start() | |
| # --- Initial yield to show loading state --- | |
| # Hide empty, show loading, show reasoning tab initially | |
| yield gr.update(value="", visible=True), gr.update(value="", visible=False), gr.update(value="", visible=False), \ | |
| gr.update(selected="reasoning", visible=True), gr.update(visible=True), \ | |
| gr.update(value="Thinking and coding...", visible=True) | |
| # --- Streaming loop for Gradio UI --- | |
| last_buffer_len = 0 | |
| while not is_agent_run_complete or agent_thread.is_alive() or len(reasoning_text_buffer) > last_buffer_len: | |
| current_full_output = "".join(reasoning_text_buffer) | |
| if len(current_full_output) > last_buffer_len: | |
| # Update reasoning output with accumulated text | |
| yield gr.update(value=current_full_output, visible=True), \ | |
| gr.update(value="", visible=False), \ | |
| gr.update(value="", visible=False), \ | |
| gr.update(selected="reasoning"), \ | |
| gr.update(visible=False), \ | |
| gr.update(value="Generating code...", visible=True) # Update loading status | |
| last_buffer_len = len(current_full_output) | |
| time.sleep(0.05) # Small delay for UI updates | |
| # After the agent run completes and all buffered text is processed: | |
| # Use the actual final answer from the agent's run method if available, otherwise buffer. | |
| # This is important if the final_answer_from_agent is more concise than the full buffer. | |
| final_output_for_parsing = final_generated_code_content if final_generated_code_content else "".join(reasoning_text_buffer) | |
| generated_code_extracted = remove_code_block(final_output_for_parsing) | |
| # Try to refine reasoning if code was extracted | |
| reasoning_only_display = final_output_for_parsing | |
| if generated_code_extracted: | |
| # Simple heuristic to remove code block from reasoning for display | |
| reasoning_only_display = reasoning_only_display.replace(f"```{generated_code_extracted}```", "").strip() | |
| reasoning_only_display = reasoning_only_display.replace(f"```html\n{generated_code_extracted}\n```", "").strip() | |
| reasoning_only_display = reasoning_only_display.replace(f"```HTML\n{generated_code_extracted}\n```", "").strip() | |
| html_to_render = send_to_sandbox(generated_code_extracted) if generated_code_extracted else "<div>No valid HTML code was generated or extracted.</div>" | |
| # Final yield to show the code and rendered output | |
| yield gr.update(value=reasoning_only_display, visible=True), \ | |
| gr.update(value=generated_code_extracted, visible=True), \ | |
| gr.update(value=html_to_render, visible=True), \ | |
| gr.update(selected="render", visible=True), \ | |
| gr.update(visible=True), \ | |
| gr.update(value="Done", visible=False) # Hide loading status | |
| # --- Gradio UI Layout (Combining your original with MiniMaxAI template) --- | |
| # Use gr.Blocks, ms.Application, antdx.XProvider, ms.AutoLoading for modelscope theming | |
| with gr.Blocks(css=CUSTOM_CSS) as demo, ms.Application(), antdx.XProvider(), ms.AutoLoading(): | |
| gr.Markdown("# SmolAgent - Intelligent AI with Web Tools") | |
| with gr.Tabs() as main_tabs: # Main tabs for Chatbot and Code Playground | |
| with gr.TabItem("Chatbot"): | |
| # Your existing chatbot tab using standard gr components | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| question = gr.Textbox(label="Your Question", lines=3, placeholder="Enter your question or task for the AI agent...") | |
| model_id_chatbot = gr.Textbox(label="Model ID", value="gpt-4o-mini", placeholder="e.g., gpt-4, claude-3-opus-20240229") | |
| with gr.Accordion("API Configuration", open=False): | |
| hf_token_chatbot = gr.Textbox(label="Hugging Face Token (Optional)", type="password", value=os.getenv("HF_TOKEN", ""), placeholder="Your Hugging Face token if using HF models") | |
| openai_api_key_chatbot = gr.Textbox(label="OpenAI API Key (Optional)", type="password", value=os.getenv("OPENAI_API_KEY", ""), placeholder="Your OpenAI API key") | |
| api_endpoint_chatbot = gr.Textbox(label="Default API Endpoint", value=os.getenv("API_ENDPOINT", "https://api.openai.com/v1"), placeholder="e.g., https://api.openai.com/v1") | |
| with gr.Group(): | |
| use_custom_endpoint_chatbot = gr.Checkbox(label="Use Custom API Endpoint") | |
| custom_api_endpoint_chatbot = gr.Textbox(label="Custom API URL", visible=False, placeholder="URL for your custom API endpoint") | |
| custom_api_key_chatbot = gr.Textbox(label="Custom API Key (Optional)", type="password", visible=False, placeholder="API key for the custom endpoint") | |
| with gr.Accordion("Search Configuration", open=False): | |
| serpapi_key_chatbot = gr.Textbox(label="SerpAPI Key (Optional)", type="password", value=os.getenv("SERPAPI_API_KEY", ""), placeholder="Your SerpAPI key for web searches") | |
| search_provider_chatbot = gr.Dropdown(choices=["serper", "searxng"], value="searxng", label="Search Provider") | |
| search_api_key_chatbot = gr.Textbox(label="Serper API Key", type="password", visible=False, placeholder="API key for Serper.dev if selected") | |
| custom_search_url_chatbot = gr.Textbox(label="Custom SearxNG URL", value="https://search.endorisk.nl/search", visible=True, placeholder="URL for your SearxNG instance") | |
| submit_btn_chatbot = gr.Button("Run Agent", variant="primary") | |
| with gr.Column(scale=2): | |
| output_chatbot = gr.HTML(label="Live Agent Output") | |
| final_chatbot = gr.Textbox(label="Final Answer", interactive=False) | |
| copy_btn_chatbot = gr.Button("Copy Final Answer") | |
| def update_visibility_chatbot(provider): | |
| is_searxng = (provider == "searxng") | |
| is_serper = (provider == "serper") | |
| return { | |
| custom_search_url_chatbot: gr.update(visible=is_searxng), | |
| search_api_key_chatbot: gr.update(visible=is_serper) | |
| } | |
| def update_custom_fields_chatbot(checked): | |
| return { | |
| custom_api_endpoint_chatbot: gr.update(visible=checked), | |
| custom_api_key_chatbot: gr.update(visible=checked) | |
| } | |
| search_provider_chatbot.change(fn=update_visibility_chatbot, inputs=search_provider_chatbot, outputs=[custom_search_url_chatbot, search_api_key_chatbot]) | |
| use_custom_endpoint_chatbot.change(fn=update_custom_fields_chatbot, inputs=use_custom_endpoint_chatbot, outputs=[custom_api_endpoint_chatbot, custom_api_key_chatbot]) | |
| submit_btn_chatbot.click( | |
| fn=setup_agent_streaming, | |
| inputs=[question, model_id_chatbot, hf_token_chatbot, openai_api_key_chatbot, serpapi_key_chatbot, api_endpoint_chatbot, use_custom_endpoint_chatbot, custom_api_endpoint_chatbot, custom_api_key_chatbot, search_provider_chatbot, search_api_key_chatbot, custom_search_url_chatbot], | |
| outputs=[output_chatbot, final_chatbot], | |
| show_progress=True | |
| ) | |
| copy_btn_chatbot.click( | |
| fn=None, | |
| inputs=final_chatbot, | |
| outputs=None, | |
| js="(text) => { if (text) { navigator.clipboard.writeText(text); return 'Copied!'; } return ''; }" | |
| ) | |
| with gr.TabItem("Code Playground (WebDev)"): | |
| # This section uses modelscope_studio.components.antd/antdx/ms | |
| with antd.Row(gutter=[32, 12], elem_classes="code-playground-container"): | |
| with antd.Col(span=24, md=12): | |
| with antd.Flex(vertical=True, gap="middle"): | |
| code_query = antd.Input.Textarea( | |
| size="large", | |
| allow_clear=True, | |
| auto_size=dict(minRows=2, maxRows=6), | |
| placeholder="Please enter what kind of application you want or choose an example below and click the button" | |
| ) | |
| generate_code_btn = antd.Button("Generate Code", type="primary", size="large") | |
| # Output tabs for Reasoning and Generated Code | |
| with antd.Tabs(active_key="reasoning", visible=False) as output_tabs_code_gen: # Matches target's output_tabs | |
| with antd.Tabs.Item(key="reasoning", label="🤔 Thinking Process"): | |
| reasoning_output = ms.Markdown(elem_classes="reasoning-box") # Use ms.Markdown | |
| with antd.Tabs.Item(key="code", label="💻 Generated Code"): | |
| # Gradio's gr.Code is suitable here, as modelscope doesn't have a direct equivalent for code display | |
| code_output_raw = gr.Code(label="Generated Code", language="html", interactive=False, lines=20) | |
| antd.Divider("Examples") | |
| # Examples with categories | |
| with antd.Tabs(elem_classes="example-tabs") as example_tabs: | |
| for category, examples_list in EXAMPLES.items(): # Renamed 'examples' to 'examples_list' to avoid conflict | |
| with antd.Tabs.Item(key=category, label=category): | |
| with antd.Flex(gap="small", wrap=True): | |
| for example in examples_list: | |
| with antd.Card( | |
| elem_classes="example-card", | |
| hoverable=True | |
| ) as example_card: | |
| antd.Card.Meta( | |
| title=example['title'], | |
| description=example['description']) | |
| # Use gr.State to pass the example data, and then select_example | |
| example_card.click( | |
| fn=select_example, | |
| inputs=[gr.State(example)], | |
| outputs=[code_query] | |
| ) | |
| with antd.Col(span=24, md=12): | |
| # This column will contain the output display: empty, loading, or rendered HTML | |
| with antd.Card(title="Output", elem_style=dict(height="100%"), styles=dict(body=dict(height="100%")), elem_id="output-container"): | |
| # This internal Tabs component will control the main right panel's state (empty/loading/render) | |
| with antd.Tabs(active_key="empty", render_tab_bar="() => null") as state_tab: # Matches target's state_tab | |
| with antd.Tabs.Item(key="empty"): | |
| empty = antd.Empty( | |
| description="Enter your request to generate code", | |
| elem_classes="output-empty" # Matches target's CSS class | |
| ) | |
| with antd.Tabs.Item(key="loading"): | |
| # The Spin component from antd | |
| with antd.Spin(True, tip="Thinking and coding...", size="large", elem_classes="output-loading") as loading_spinner: # Matches target's loading | |
| ms.Div() # Placeholder for content inside spin | |
| with antd.Tabs.Item(key="render"): | |
| sandbox_output = gr.HTML(elem_classes="output-html") # Matches target's sandbox | |
| # --- Interactions for Code Playground --- | |
| # `loading_tip` is now a gr.State and used for JS triggers and Python updates. | |
| loading_tip = gr.State("Ready") | |
| # Initial setup when code_query is submitted or button clicked | |
| generate_code_btn.click( | |
| fn=lambda: ( | |
| gr.update(selected="loading"), # Switch to loading tab in the right panel | |
| gr.update(visible=False), # Hide the empty state component | |
| gr.update(visible=True), # Show the loading state component | |
| gr.update(value="Thinking and coding...", visible=True), # Update loading tip text | |
| gr.update(value="", visible=True), # Clear reasoning output, make it visible | |
| gr.update(value="", visible=False), # Clear raw code output, hide it | |
| gr.update(value="", visible=False) # Clear sandbox output, hide it | |
| ), | |
| outputs=[state_tab, empty_state_group, loading_spinner, loading_tip, reasoning_output, code_output_raw, sandbox_output], | |
| queue=False # This pre-processing step should not be queued | |
| ).then( | |
| fn=generate_code_streaming, | |
| inputs=[ | |
| code_query, model_id_code, hf_token_code, openai_api_key_code, serpapi_key_chatbot, # Re-using chatbot's serpapi | |
| api_endpoint_code, use_custom_endpoint_code, custom_api_endpoint_code, custom_api_key_code | |
| ], | |
| outputs=[reasoning_output, code_output_raw, sandbox_output, state_tab, output_tabs_code_gen, loading_tip], | |
| show_progress="hidden" # Manage progress via loading_tip and state_tab | |
| ).then( | |
| fn=lambda: (gr.update(visible=False)), # Hide the loading spinner after the process completes | |
| outputs=[loading_spinner] | |
| ) | |
| # Auto-scroll functionality from MiniMaxAI template | |
| # This needs to target ms.Markdown components. | |
| # Note: `elem_classes` for ms.Markdown might be different from raw Gradio. | |
| reasoning_output.change( | |
| fn=None, | |
| inputs=[], | |
| outputs=[], | |
| js=""" | |
| function() { | |
| setTimeout(() => { | |
| const reasoningBox = document.querySelector('.reasoning-box'); | |
| if (reasoningBox) { | |
| reasoningBox.scrollTop = reasoningBox.scrollHeight; | |
| } | |
| }, 100); | |
| } | |
| """ | |
| ) | |
| code_output_raw.change( # This is gr.Code, might need different selector | |
| fn=None, | |
| inputs=[], | |
| outputs=[], | |
| js=""" | |
| function() { | |
| setTimeout(() => { | |
| // Gradio's gr.Code output is often within a <textarea> or <pre> inside a div | |
| const codeBox = document.querySelector('.markdown-container pre, .markdown-container textarea'); | |
| if (codeBox) { | |
| codeBox.scrollTop = codeBox.scrollHeight; | |
| } | |
| }, 100); | |
| } | |
| """ | |
| ) | |
| # Handling tab changes to ensure correct visibility as in MiniMaxAI | |
| def on_output_tabs_change(tab_key): | |
| # This function is not directly used in the current streaming yield flow | |
| # but is provided in the original template for programmatic tab changes. | |
| # In our streaming, we set `selected` directly in the yields. | |
| return gr.update(active_key=tab_key) | |
| # The original MiniMaxAI app had a `output_tabs.change` event. | |
| # In our setup, `output_tabs_code_gen` (the Reasoning/Code tabs) | |
| # visibility and selected tab are controlled directly by the `generate_code_streaming` | |
| # function's yields. `state_tab` (empty/loading/render) is the main outer control. | |
| # If you need specific behavior when a user manually switches 'Thinking Process' vs 'Generated Code' | |
| # after the process starts, you'd enable this. | |
| # output_tabs_code_gen.change(fn=on_output_tabs_change, inputs=output_tabs_code_gen, outputs=[output_tabs_code_gen]) | |
| print("[DEBUG] Launching updated Gradio interface") | |
| demo.queue(default_concurrency_limit=50).launch(ssr_mode=False) # Keep queue and ssr_mode if relevant to your setup | |
| if __name__ == "__main__": | |
| launch_interface() |