Spaces:
Running
Running
feat: add functions to detect available API keys and set default provider
Browse files
app.py
CHANGED
@@ -394,6 +394,39 @@ def get_default_model_for_provider(provider: str) -> str:
|
|
394 |
)
|
395 |
|
396 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
def save_api_key(provider, api_key):
|
398 |
"""Save API key to environment variable and update model accordingly."""
|
399 |
if not api_key.strip():
|
@@ -428,8 +461,11 @@ def save_api_key(provider, api_key):
|
|
428 |
return f"β Unknown provider: {provider}"
|
429 |
|
430 |
|
431 |
-
def get_api_key_status(selected_llm_provider=
|
432 |
-
"""Get the status of
|
|
|
|
|
|
|
433 |
env_vars = {
|
434 |
"Hugging Face": "HUGGINGFACE_API_KEY",
|
435 |
"Anthropic": "ANTHROPIC_API_KEY",
|
@@ -439,31 +475,37 @@ def get_api_key_status(selected_llm_provider="Anthropic"):
|
|
439 |
}
|
440 |
|
441 |
status = []
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
|
463 |
# Show current active model
|
464 |
current_model = os.getenv("MODEL_ID", "Qwen/Qwen2.5-Coder-32B-Instruct")
|
465 |
status.append(f"π€ Current Active Model: {current_model}")
|
466 |
|
|
|
|
|
|
|
|
|
|
|
|
|
467 |
return "\n".join(status)
|
468 |
|
469 |
|
@@ -645,39 +687,28 @@ class GradioUI:
|
|
645 |
label="Current API Key Status",
|
646 |
value=get_api_key_status(),
|
647 |
interactive=False,
|
648 |
-
lines=
|
649 |
-
max_lines=
|
650 |
)
|
651 |
|
652 |
-
# with gr.Row():
|
653 |
-
# refresh_status_btn = gr.Button(
|
654 |
-
# "π Refresh Status", size="sm"
|
655 |
-
# )
|
656 |
-
|
657 |
-
gr.Markdown("---")
|
658 |
-
|
659 |
-
# Hugging Face Token
|
660 |
-
with gr.Row():
|
661 |
-
hf_token = gr.Textbox(
|
662 |
-
label="Hugging Face Token",
|
663 |
-
placeholder="hf_...",
|
664 |
-
type="password",
|
665 |
-
scale=4,
|
666 |
-
)
|
667 |
-
hf_save_btn = gr.Button("Save", size="sm", scale=1)
|
668 |
-
|
669 |
gr.Markdown("---")
|
670 |
|
671 |
-
# LLM Token with Provider Selection
|
672 |
with gr.Row():
|
673 |
llm_provider = gr.Dropdown(
|
674 |
label="LLM Provider",
|
675 |
-
choices=[
|
676 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
677 |
scale=1,
|
678 |
)
|
679 |
llm_token = gr.Textbox(
|
680 |
-
label="
|
681 |
placeholder="Enter your API key...",
|
682 |
type="password",
|
683 |
scale=3,
|
@@ -695,13 +726,11 @@ class GradioUI:
|
|
695 |
file_uploads_log = gr.State([])
|
696 |
|
697 |
# Set up event handlers for API key saving
|
698 |
-
def save_and_update_status(
|
699 |
-
provider, api_key, selected_llm_provider="Anthropic", session_state=None
|
700 |
-
):
|
701 |
message = save_api_key(provider, api_key)
|
702 |
-
status = get_api_key_status(
|
703 |
|
704 |
-
#
|
705 |
if provider != "Hugging Face" and session_state is not None:
|
706 |
agent_message = self.recreate_agent_with_new_model(
|
707 |
session_state, provider
|
@@ -711,28 +740,14 @@ class GradioUI:
|
|
711 |
|
712 |
return message, status, "" # Clear the input field
|
713 |
|
714 |
-
hf_save_btn.click(
|
715 |
-
lambda key, llm_prov, sess_state: save_and_update_status(
|
716 |
-
"Hugging Face", key, llm_prov, sess_state
|
717 |
-
),
|
718 |
-
inputs=[hf_token, llm_provider, session_state],
|
719 |
-
outputs=[api_message, api_status, hf_token],
|
720 |
-
).then(lambda: gr.Textbox(visible=True), outputs=[api_message])
|
721 |
-
|
722 |
llm_save_btn.click(
|
723 |
lambda provider, key, sess_state: save_and_update_status(
|
724 |
-
provider, key,
|
725 |
),
|
726 |
inputs=[llm_provider, llm_token, session_state],
|
727 |
outputs=[api_message, api_status, llm_token],
|
728 |
).then(lambda: gr.Textbox(visible=True), outputs=[api_message])
|
729 |
|
730 |
-
# refresh_status_btn.click(
|
731 |
-
# fn=lambda llm_prov: get_api_key_status(llm_prov),
|
732 |
-
# inputs=[llm_provider],
|
733 |
-
# outputs=[api_status],
|
734 |
-
# )
|
735 |
-
|
736 |
# Update status when LLM provider dropdown changes
|
737 |
llm_provider.change(
|
738 |
fn=get_api_key_status, inputs=[llm_provider], outputs=[api_status]
|
|
|
394 |
)
|
395 |
|
396 |
|
397 |
+
def get_available_providers():
|
398 |
+
"""Detect which API keys are available in environment variables."""
|
399 |
+
env_vars = {
|
400 |
+
"Anthropic": "ANTHROPIC_API_KEY",
|
401 |
+
"OpenAI": "OPENAI_API_KEY",
|
402 |
+
"Hugging Face": "HUGGINGFACE_API_KEY",
|
403 |
+
"SambaNova": "SAMBANOVA_API_KEY",
|
404 |
+
"Mistral": "MISTRAL_API_KEY",
|
405 |
+
}
|
406 |
+
|
407 |
+
available_providers = []
|
408 |
+
for provider, env_var in env_vars.items():
|
409 |
+
if os.getenv(env_var):
|
410 |
+
available_providers.append(provider)
|
411 |
+
|
412 |
+
return available_providers
|
413 |
+
|
414 |
+
|
415 |
+
def get_default_provider():
|
416 |
+
"""Get the default provider based on available API keys."""
|
417 |
+
available = get_available_providers()
|
418 |
+
|
419 |
+
# Priority order for default selection
|
420 |
+
priority_order = ["Anthropic", "OpenAI", "Mistral", "SambaNova", "Hugging Face"]
|
421 |
+
|
422 |
+
for provider in priority_order:
|
423 |
+
if provider in available:
|
424 |
+
return provider
|
425 |
+
|
426 |
+
# If no keys are available, default to Anthropic
|
427 |
+
return "Anthropic"
|
428 |
+
|
429 |
+
|
430 |
def save_api_key(provider, api_key):
|
431 |
"""Save API key to environment variable and update model accordingly."""
|
432 |
if not api_key.strip():
|
|
|
461 |
return f"β Unknown provider: {provider}"
|
462 |
|
463 |
|
464 |
+
def get_api_key_status(selected_llm_provider=None):
|
465 |
+
"""Get the status of all API keys, highlighting the selected provider."""
|
466 |
+
if selected_llm_provider is None:
|
467 |
+
selected_llm_provider = get_default_provider()
|
468 |
+
|
469 |
env_vars = {
|
470 |
"Hugging Face": "HUGGINGFACE_API_KEY",
|
471 |
"Anthropic": "ANTHROPIC_API_KEY",
|
|
|
475 |
}
|
476 |
|
477 |
status = []
|
478 |
+
available_providers = get_available_providers()
|
479 |
+
|
480 |
+
# Show status for all providers
|
481 |
+
for provider, env_var in env_vars.items():
|
482 |
+
if os.getenv(env_var):
|
483 |
+
key = os.getenv(env_var)
|
484 |
+
masked_key = f"{key[:8]}...{key[-4:]}" if len(key) > 12 else "***"
|
485 |
+
model = get_default_model_for_provider(provider)
|
486 |
+
|
487 |
+
# Highlight the selected provider
|
488 |
+
if provider == selected_llm_provider:
|
489 |
+
status.append(f"π― {provider}: {masked_key} (Model: {model}) [ACTIVE]")
|
490 |
+
else:
|
491 |
+
status.append(f"β
{provider}: {masked_key} (Model: {model})")
|
492 |
+
else:
|
493 |
+
model = get_default_model_for_provider(provider)
|
494 |
+
if provider == selected_llm_provider:
|
495 |
+
status.append(f"β {provider}: Not set (Would use: {model}) [SELECTED]")
|
496 |
+
else:
|
497 |
+
status.append(f"β {provider}: Not set (Would use: {model})")
|
498 |
|
499 |
# Show current active model
|
500 |
current_model = os.getenv("MODEL_ID", "Qwen/Qwen2.5-Coder-32B-Instruct")
|
501 |
status.append(f"π€ Current Active Model: {current_model}")
|
502 |
|
503 |
+
# Show summary
|
504 |
+
if available_providers:
|
505 |
+
status.append(f"π Available providers: {', '.join(available_providers)}")
|
506 |
+
else:
|
507 |
+
status.append("β οΈ No API keys detected in environment")
|
508 |
+
|
509 |
return "\n".join(status)
|
510 |
|
511 |
|
|
|
687 |
label="Current API Key Status",
|
688 |
value=get_api_key_status(),
|
689 |
interactive=False,
|
690 |
+
lines=6,
|
691 |
+
max_lines=8,
|
692 |
)
|
693 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
694 |
gr.Markdown("---")
|
695 |
|
696 |
+
# LLM Token with Provider Selection (now includes Hugging Face)
|
697 |
with gr.Row():
|
698 |
llm_provider = gr.Dropdown(
|
699 |
label="LLM Provider",
|
700 |
+
choices=[
|
701 |
+
"Anthropic",
|
702 |
+
"OpenAI",
|
703 |
+
"Mistral",
|
704 |
+
"SambaNova",
|
705 |
+
"Hugging Face",
|
706 |
+
],
|
707 |
+
value=get_default_provider(),
|
708 |
scale=1,
|
709 |
)
|
710 |
llm_token = gr.Textbox(
|
711 |
+
label="API Key",
|
712 |
placeholder="Enter your API key...",
|
713 |
type="password",
|
714 |
scale=3,
|
|
|
726 |
file_uploads_log = gr.State([])
|
727 |
|
728 |
# Set up event handlers for API key saving
|
729 |
+
def save_and_update_status(provider, api_key, session_state=None):
|
|
|
|
|
730 |
message = save_api_key(provider, api_key)
|
731 |
+
status = get_api_key_status(provider)
|
732 |
|
733 |
+
# For non-Hugging Face providers, recreate the agent
|
734 |
if provider != "Hugging Face" and session_state is not None:
|
735 |
agent_message = self.recreate_agent_with_new_model(
|
736 |
session_state, provider
|
|
|
740 |
|
741 |
return message, status, "" # Clear the input field
|
742 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
743 |
llm_save_btn.click(
|
744 |
lambda provider, key, sess_state: save_and_update_status(
|
745 |
+
provider, key, sess_state
|
746 |
),
|
747 |
inputs=[llm_provider, llm_token, session_state],
|
748 |
outputs=[api_message, api_status, llm_token],
|
749 |
).then(lambda: gr.Textbox(visible=True), outputs=[api_message])
|
750 |
|
|
|
|
|
|
|
|
|
|
|
|
|
751 |
# Update status when LLM provider dropdown changes
|
752 |
llm_provider.change(
|
753 |
fn=get_api_key_status, inputs=[llm_provider], outputs=[api_status]
|