Upload 34 files
Browse files- app.py +1 -1
- coursecrafter/agents/__pycache__/llm_client.cpython-311.pyc +0 -0
- coursecrafter/agents/__pycache__/simple_course_agent.cpython-311.pyc +0 -0
- coursecrafter/agents/llm_client.py +34 -0
- coursecrafter/agents/simple_course_agent.py +7 -0
- coursecrafter/ui/__pycache__/gradio_app.cpython-311.pyc +0 -0
- coursecrafter/ui/gradio_app.py +48 -16
- coursecrafter/utils/__pycache__/config.cpython-311.pyc +0 -0
- coursecrafter/utils/config.py +12 -2
app.py
CHANGED
|
@@ -45,7 +45,7 @@ def main():
|
|
| 45 |
launch_kwargs = {
|
| 46 |
"server_name": "0.0.0.0",
|
| 47 |
"server_port": 7860,
|
| 48 |
-
"share":
|
| 49 |
"show_error": True,
|
| 50 |
"quiet": False
|
| 51 |
}
|
|
|
|
| 45 |
launch_kwargs = {
|
| 46 |
"server_name": "0.0.0.0",
|
| 47 |
"server_port": 7860,
|
| 48 |
+
"share": False,
|
| 49 |
"show_error": True,
|
| 50 |
"quiet": False
|
| 51 |
}
|
coursecrafter/agents/__pycache__/llm_client.cpython-311.pyc
CHANGED
|
Binary files a/coursecrafter/agents/__pycache__/llm_client.cpython-311.pyc and b/coursecrafter/agents/__pycache__/llm_client.cpython-311.pyc differ
|
|
|
coursecrafter/agents/__pycache__/simple_course_agent.cpython-311.pyc
CHANGED
|
Binary files a/coursecrafter/agents/__pycache__/simple_course_agent.cpython-311.pyc and b/coursecrafter/agents/__pycache__/simple_course_agent.cpython-311.pyc differ
|
|
|
coursecrafter/agents/llm_client.py
CHANGED
|
@@ -8,6 +8,7 @@ import json
|
|
| 8 |
from typing import Dict, List, Any, Optional, AsyncGenerator
|
| 9 |
from dataclasses import dataclass
|
| 10 |
from abc import ABC, abstractmethod
|
|
|
|
| 11 |
|
| 12 |
import openai
|
| 13 |
import anthropic
|
|
@@ -249,6 +250,39 @@ class LlmClient:
|
|
| 249 |
except Exception as e:
|
| 250 |
print(f"β Failed to initialize {provider} client: {e}")
|
| 251 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
def get_available_providers(self) -> List[LLMProvider]:
|
| 253 |
"""Get list of available providers"""
|
| 254 |
return list(self.clients.keys())
|
|
|
|
| 8 |
from typing import Dict, List, Any, Optional, AsyncGenerator
|
| 9 |
from dataclasses import dataclass
|
| 10 |
from abc import ABC, abstractmethod
|
| 11 |
+
import os
|
| 12 |
|
| 13 |
import openai
|
| 14 |
import anthropic
|
|
|
|
| 250 |
except Exception as e:
|
| 251 |
print(f"β Failed to initialize {provider} client: {e}")
|
| 252 |
|
| 253 |
+
def update_provider_config(self, provider: str, api_key: str = None, **kwargs):
|
| 254 |
+
"""Update configuration for a specific provider and reinitialize client"""
|
| 255 |
+
|
| 256 |
+
# Update environment variables
|
| 257 |
+
if provider == "openai" and api_key:
|
| 258 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
| 259 |
+
elif provider == "anthropic" and api_key:
|
| 260 |
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
| 261 |
+
elif provider == "google" and api_key:
|
| 262 |
+
os.environ["GOOGLE_API_KEY"] = api_key
|
| 263 |
+
elif provider == "openai_compatible":
|
| 264 |
+
if api_key:
|
| 265 |
+
os.environ["OPENAI_COMPATIBLE_API_KEY"] = api_key
|
| 266 |
+
if kwargs.get("base_url"):
|
| 267 |
+
os.environ["OPENAI_COMPATIBLE_BASE_URL"] = kwargs["base_url"]
|
| 268 |
+
if kwargs.get("model"):
|
| 269 |
+
os.environ["OPENAI_COMPATIBLE_MODEL"] = kwargs["model"]
|
| 270 |
+
|
| 271 |
+
# Reinitialize the specific client
|
| 272 |
+
try:
|
| 273 |
+
if provider in ["openai", "openai_compatible"]:
|
| 274 |
+
self.clients[provider] = OpenAIClient(provider)
|
| 275 |
+
elif provider == "anthropic":
|
| 276 |
+
self.clients[provider] = AnthropicClient()
|
| 277 |
+
elif provider == "google":
|
| 278 |
+
self.clients[provider] = GoogleClient()
|
| 279 |
+
|
| 280 |
+
print(f"β
Updated and reinitialized {provider} client")
|
| 281 |
+
return True
|
| 282 |
+
except Exception as e:
|
| 283 |
+
print(f"β Failed to reinitialize {provider} client: {e}")
|
| 284 |
+
return False
|
| 285 |
+
|
| 286 |
def get_available_providers(self) -> List[LLMProvider]:
|
| 287 |
"""Get list of available providers"""
|
| 288 |
return list(self.clients.keys())
|
coursecrafter/agents/simple_course_agent.py
CHANGED
|
@@ -106,6 +106,13 @@ For quizzes, return:
|
|
| 106 |
|
| 107 |
Always strive to create courses that are not just informative, but are easy to understand, engaging, learning experiences."""
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
async def generate_course(
|
| 110 |
self,
|
| 111 |
topic: str,
|
|
|
|
| 106 |
|
| 107 |
Always strive to create courses that are not just informative, but are easy to understand, engaging, learning experiences."""
|
| 108 |
|
| 109 |
+
def update_provider_config(self, provider: str, api_key: str = None, **kwargs):
|
| 110 |
+
"""Update provider configuration and reinitialize client"""
|
| 111 |
+
success = self.llm_client.update_provider_config(provider, api_key, **kwargs)
|
| 112 |
+
if success:
|
| 113 |
+
self.default_provider = provider
|
| 114 |
+
return success
|
| 115 |
+
|
| 116 |
async def generate_course(
|
| 117 |
self,
|
| 118 |
topic: str,
|
coursecrafter/ui/__pycache__/gradio_app.cpython-311.pyc
CHANGED
|
Binary files a/coursecrafter/ui/__pycache__/gradio_app.cpython-311.pyc and b/coursecrafter/ui/__pycache__/gradio_app.cpython-311.pyc differ
|
|
|
coursecrafter/ui/gradio_app.py
CHANGED
|
@@ -775,8 +775,10 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 775 |
|
| 776 |
# Provider change handler to show/hide OpenAI-compatible fields
|
| 777 |
def on_provider_change(provider):
|
| 778 |
-
|
| 779 |
-
|
|
|
|
|
|
|
| 780 |
|
| 781 |
# Event handlers
|
| 782 |
async def generate_course_wrapper(topic: str, difficulty: str, lessons: int, provider: str, api_key: str, endpoint_url: str, model_name: str, progress=gr.Progress()):
|
|
@@ -785,28 +787,28 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 785 |
return (
|
| 786 |
"<div class='error'>β Please enter a topic for your course.</div>",
|
| 787 |
"", "",
|
| 788 |
-
False, [], "<div class='image-details'>Error loading images</div>"
|
| 789 |
)
|
| 790 |
|
| 791 |
if not api_key.strip() and provider != "openai_compatible":
|
| 792 |
return (
|
| 793 |
"<div class='error'>β Please enter your API key for the selected LLM provider.</div>",
|
| 794 |
"", "",
|
| 795 |
-
False, [], "<div class='image-details'>Error loading images</div>"
|
| 796 |
)
|
| 797 |
|
| 798 |
if provider == "openai_compatible" and not endpoint_url.strip():
|
| 799 |
return (
|
| 800 |
"<div class='error'>β Please enter the endpoint URL for OpenAI-compatible provider.</div>",
|
| 801 |
"", "",
|
| 802 |
-
False, [], "<div class='image-details'>Error loading images</div>"
|
| 803 |
)
|
| 804 |
|
| 805 |
if provider == "openai_compatible" and not model_name.strip():
|
| 806 |
return (
|
| 807 |
"<div class='error'>β Please enter the model name for OpenAI-compatible provider.</div>",
|
| 808 |
"", "",
|
| 809 |
-
False, [], "<div class='image-details'>Error loading images</div>"
|
| 810 |
)
|
| 811 |
|
| 812 |
try:
|
|
@@ -827,13 +829,37 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 827 |
os.environ["OPENAI_COMPATIBLE_BASE_URL"] = endpoint_url
|
| 828 |
os.environ["OPENAI_COMPATIBLE_MODEL"] = model_name
|
| 829 |
|
| 830 |
-
#
|
|
|
|
| 831 |
agent = SimpleCourseAgent()
|
| 832 |
-
|
| 833 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 834 |
course_context["agent"] = agent
|
| 835 |
course_context["topic"] = topic
|
| 836 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 837 |
progress(0.1, desc="βοΈ Setting up generation options...")
|
| 838 |
|
| 839 |
# Create generation options
|
|
@@ -853,7 +879,7 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 853 |
return (
|
| 854 |
"<div class='error'>β No LLM providers available. Please check your API keys.</div>",
|
| 855 |
"", "",
|
| 856 |
-
False, [], "<div class='image-details'>Error loading images</div>"
|
| 857 |
)
|
| 858 |
|
| 859 |
progress(0.2, desc="π Starting course generation...")
|
|
@@ -953,8 +979,14 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 953 |
quiz_data = course_data.get("quiz", {})
|
| 954 |
quizzes_html = format_quiz(quiz_data)
|
| 955 |
|
| 956 |
-
# Show quiz button if quiz exists
|
| 957 |
-
quiz_btn_visible = bool(quiz_data and quiz_data.get("questions"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 958 |
|
| 959 |
progress(0.98, desc="πΌοΈ Processing images for gallery...")
|
| 960 |
|
|
@@ -1210,7 +1242,7 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 1210 |
|
| 1211 |
return (
|
| 1212 |
lessons_html, flashcards_html, quizzes_html,
|
| 1213 |
-
quiz_btn_visible, images, image_details_html
|
| 1214 |
)
|
| 1215 |
else:
|
| 1216 |
quiz_btn_visible = False
|
|
@@ -1218,7 +1250,7 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 1218 |
|
| 1219 |
return (
|
| 1220 |
"", "", "",
|
| 1221 |
-
quiz_btn_visible, [], "<div class='image-details'>No images available</div>"
|
| 1222 |
)
|
| 1223 |
|
| 1224 |
except Exception as e:
|
|
@@ -1227,13 +1259,13 @@ def create_coursecrafter_interface() -> gr.Blocks:
|
|
| 1227 |
print(f"Error in course generation: {error_details}")
|
| 1228 |
return (
|
| 1229 |
"", "", "",
|
| 1230 |
-
False, [], "<div class='image-details'>Error loading images</div>"
|
| 1231 |
)
|
| 1232 |
|
| 1233 |
def handle_quiz_submit():
|
| 1234 |
"""Handle quiz submission using client-side processing"""
|
| 1235 |
# This function will be replaced by client-side JavaScript
|
| 1236 |
-
return
|
| 1237 |
|
| 1238 |
async def handle_chat(message: str, current_chat: str):
|
| 1239 |
"""Handle chat messages for answering questions about the course content"""
|
|
|
|
| 775 |
|
| 776 |
# Provider change handler to show/hide OpenAI-compatible fields
|
| 777 |
def on_provider_change(provider):
|
| 778 |
+
if provider == "openai_compatible":
|
| 779 |
+
return gr.update(visible=True)
|
| 780 |
+
else:
|
| 781 |
+
return gr.update(visible=False)
|
| 782 |
|
| 783 |
# Event handlers
|
| 784 |
async def generate_course_wrapper(topic: str, difficulty: str, lessons: int, provider: str, api_key: str, endpoint_url: str, model_name: str, progress=gr.Progress()):
|
|
|
|
| 787 |
return (
|
| 788 |
"<div class='error'>β Please enter a topic for your course.</div>",
|
| 789 |
"", "",
|
| 790 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 791 |
)
|
| 792 |
|
| 793 |
if not api_key.strip() and provider != "openai_compatible":
|
| 794 |
return (
|
| 795 |
"<div class='error'>β Please enter your API key for the selected LLM provider.</div>",
|
| 796 |
"", "",
|
| 797 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 798 |
)
|
| 799 |
|
| 800 |
if provider == "openai_compatible" and not endpoint_url.strip():
|
| 801 |
return (
|
| 802 |
"<div class='error'>β Please enter the endpoint URL for OpenAI-compatible provider.</div>",
|
| 803 |
"", "",
|
| 804 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 805 |
)
|
| 806 |
|
| 807 |
if provider == "openai_compatible" and not model_name.strip():
|
| 808 |
return (
|
| 809 |
"<div class='error'>β Please enter the model name for OpenAI-compatible provider.</div>",
|
| 810 |
"", "",
|
| 811 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 812 |
)
|
| 813 |
|
| 814 |
try:
|
|
|
|
| 829 |
os.environ["OPENAI_COMPATIBLE_BASE_URL"] = endpoint_url
|
| 830 |
os.environ["OPENAI_COMPATIBLE_MODEL"] = model_name
|
| 831 |
|
| 832 |
+
# IMPORTANT: Create a fresh agent instance to pick up the new environment variables
|
| 833 |
+
# This ensures the LlmClient reinitializes with the updated API keys
|
| 834 |
agent = SimpleCourseAgent()
|
| 835 |
+
|
| 836 |
+
# Use the new dynamic configuration method to update provider settings
|
| 837 |
+
config_kwargs = {}
|
| 838 |
+
if provider == "openai_compatible":
|
| 839 |
+
config_kwargs["base_url"] = endpoint_url
|
| 840 |
+
config_kwargs["model"] = model_name
|
| 841 |
+
|
| 842 |
+
# Update provider configuration dynamically
|
| 843 |
+
config_success = agent.update_provider_config(provider, api_key, **config_kwargs)
|
| 844 |
+
if not config_success:
|
| 845 |
+
return (
|
| 846 |
+
f"<div class='error'>β Failed to configure provider '{provider}'. Please check your API key and settings.</div>",
|
| 847 |
+
"", "",
|
| 848 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
course_context["agent"] = agent
|
| 852 |
course_context["topic"] = topic
|
| 853 |
|
| 854 |
+
# Verify the provider is available with the new configuration
|
| 855 |
+
available_providers = agent.get_available_providers()
|
| 856 |
+
if provider not in available_providers:
|
| 857 |
+
return (
|
| 858 |
+
f"<div class='error'>β Provider '{provider}' is not available after configuration. Please check your API key and configuration.</div>",
|
| 859 |
+
"", "",
|
| 860 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 861 |
+
)
|
| 862 |
+
|
| 863 |
progress(0.1, desc="βοΈ Setting up generation options...")
|
| 864 |
|
| 865 |
# Create generation options
|
|
|
|
| 879 |
return (
|
| 880 |
"<div class='error'>β No LLM providers available. Please check your API keys.</div>",
|
| 881 |
"", "",
|
| 882 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 883 |
)
|
| 884 |
|
| 885 |
progress(0.2, desc="π Starting course generation...")
|
|
|
|
| 979 |
quiz_data = course_data.get("quiz", {})
|
| 980 |
quizzes_html = format_quiz(quiz_data)
|
| 981 |
|
| 982 |
+
# Show quiz button if quiz exists - be more permissive to ensure it shows
|
| 983 |
+
quiz_btn_visible = bool(quiz_data and (quiz_data.get("questions") or len(str(quiz_data)) > 50))
|
| 984 |
+
print(f"π― Quiz button visibility: {quiz_btn_visible} (quiz_data: {bool(quiz_data)}, questions: {bool(quiz_data.get('questions') if quiz_data else False)})")
|
| 985 |
+
|
| 986 |
+
# Force quiz button to be visible if we have any quiz content
|
| 987 |
+
if quiz_data and not quiz_btn_visible:
|
| 988 |
+
print("β οΈ Forcing quiz button to be visible due to quiz data presence")
|
| 989 |
+
quiz_btn_visible = True
|
| 990 |
|
| 991 |
progress(0.98, desc="πΌοΈ Processing images for gallery...")
|
| 992 |
|
|
|
|
| 1242 |
|
| 1243 |
return (
|
| 1244 |
lessons_html, flashcards_html, quizzes_html,
|
| 1245 |
+
gr.update(visible=quiz_btn_visible), images, image_details_html
|
| 1246 |
)
|
| 1247 |
else:
|
| 1248 |
quiz_btn_visible = False
|
|
|
|
| 1250 |
|
| 1251 |
return (
|
| 1252 |
"", "", "",
|
| 1253 |
+
gr.update(visible=quiz_btn_visible), [], "<div class='image-details'>No images available</div>"
|
| 1254 |
)
|
| 1255 |
|
| 1256 |
except Exception as e:
|
|
|
|
| 1259 |
print(f"Error in course generation: {error_details}")
|
| 1260 |
return (
|
| 1261 |
"", "", "",
|
| 1262 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
| 1263 |
)
|
| 1264 |
|
| 1265 |
def handle_quiz_submit():
|
| 1266 |
"""Handle quiz submission using client-side processing"""
|
| 1267 |
# This function will be replaced by client-side JavaScript
|
| 1268 |
+
return gr.update()
|
| 1269 |
|
| 1270 |
async def handle_chat(message: str, current_chat: str):
|
| 1271 |
"""Handle chat messages for answering questions about the course content"""
|
coursecrafter/utils/__pycache__/config.cpython-311.pyc
CHANGED
|
Binary files a/coursecrafter/utils/__pycache__/config.cpython-311.pyc and b/coursecrafter/utils/__pycache__/config.cpython-311.pyc differ
|
|
|
coursecrafter/utils/config.py
CHANGED
|
@@ -163,6 +163,9 @@ class Config:
|
|
| 163 |
|
| 164 |
def get_llm_config(self, provider: LLMProvider) -> LLMProviderConfig:
|
| 165 |
"""Get configuration for a specific LLM provider"""
|
|
|
|
|
|
|
|
|
|
| 166 |
if provider not in self._config["llm_providers"]:
|
| 167 |
raise ValueError(f"Unknown LLM provider: {provider}")
|
| 168 |
|
|
@@ -171,6 +174,9 @@ class Config:
|
|
| 171 |
|
| 172 |
def get_available_llm_providers(self) -> List[LLMProvider]:
|
| 173 |
"""Get list of available LLM providers with API keys"""
|
|
|
|
|
|
|
|
|
|
| 174 |
available = []
|
| 175 |
for provider, config in self._config["llm_providers"].items():
|
| 176 |
if provider == "openai_compatible":
|
|
@@ -185,6 +191,9 @@ class Config:
|
|
| 185 |
|
| 186 |
def get_default_llm_provider(self) -> LLMProvider:
|
| 187 |
"""Get the default LLM provider, falling back to first available if not configured"""
|
|
|
|
|
|
|
|
|
|
| 188 |
default_provider = self._config["system"]["default_llm_provider"]
|
| 189 |
available_providers = self.get_available_llm_providers()
|
| 190 |
|
|
@@ -197,8 +206,9 @@ class Config:
|
|
| 197 |
print(f"β οΈ Default provider '{default_provider}' not configured, using '{available_providers[0]}'")
|
| 198 |
return available_providers[0]
|
| 199 |
|
| 200 |
-
# If no providers are available,
|
| 201 |
-
|
|
|
|
| 202 |
|
| 203 |
def get_image_generation_config(self) -> Dict[str, Any]:
|
| 204 |
"""Get image generation configuration"""
|
|
|
|
| 163 |
|
| 164 |
def get_llm_config(self, provider: LLMProvider) -> LLMProviderConfig:
|
| 165 |
"""Get configuration for a specific LLM provider"""
|
| 166 |
+
# Reload config to pick up any environment variable changes
|
| 167 |
+
self._config = self._load_default_config()
|
| 168 |
+
|
| 169 |
if provider not in self._config["llm_providers"]:
|
| 170 |
raise ValueError(f"Unknown LLM provider: {provider}")
|
| 171 |
|
|
|
|
| 174 |
|
| 175 |
def get_available_llm_providers(self) -> List[LLMProvider]:
|
| 176 |
"""Get list of available LLM providers with API keys"""
|
| 177 |
+
# Reload config to pick up any environment variable changes
|
| 178 |
+
self._config = self._load_default_config()
|
| 179 |
+
|
| 180 |
available = []
|
| 181 |
for provider, config in self._config["llm_providers"].items():
|
| 182 |
if provider == "openai_compatible":
|
|
|
|
| 191 |
|
| 192 |
def get_default_llm_provider(self) -> LLMProvider:
|
| 193 |
"""Get the default LLM provider, falling back to first available if not configured"""
|
| 194 |
+
# Reload config to pick up any environment variable changes
|
| 195 |
+
self._config = self._load_default_config()
|
| 196 |
+
|
| 197 |
default_provider = self._config["system"]["default_llm_provider"]
|
| 198 |
available_providers = self.get_available_llm_providers()
|
| 199 |
|
|
|
|
| 206 |
print(f"β οΈ Default provider '{default_provider}' not configured, using '{available_providers[0]}'")
|
| 207 |
return available_providers[0]
|
| 208 |
|
| 209 |
+
# If no providers are available, return a fallback instead of raising an error
|
| 210 |
+
print("β οΈ Warning: No LLM providers are configured. Returning 'google' as fallback.")
|
| 211 |
+
return "google" # Return a fallback provider that can be configured later
|
| 212 |
|
| 213 |
def get_image_generation_config(self) -> Dict[str, Any]:
|
| 214 |
"""Get image generation configuration"""
|