Spaces:
Runtime error
Runtime error
File size: 4,436 Bytes
de12663 5e5394a de12663 4a22a8f de12663 38406d1 de12663 38406d1 de12663 532708d 1222e6b 532708d 1222e6b de12663 1222e6b de12663 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
"""
This class contains code for setting up a conversation initialization
with agents. It includes the use of Autogens Assistant agent for writing
code and User Proxy agent for executing actions.
Written by: Antoine Ross - October 2023
"""
import os
import shutil
import autogen
import logging
import pprint as pp
import streamlit as st
import chainlit as cl
from pathlib import Path
from datetime import datetime
from multiprocessing import Process, Queue
from autogen import AssistantAgent, UserProxyAgent
import json
class MultiAgent():
"""
StreamlitAgent class for managing Autogen chat agents and related operations.
"""
def __init__(self, **kwargs):
"""
Initialize StreamlitAgent with coding assistant and runner agents.
"""
# self.model = kwargs.get('model', 'vicuna')
# self.model = kwargs.get('model', 'gpt-3.5-turbo-16k')
self.model = kwargs.get('model', 'gpt-4')
self.work_dir = kwargs.get('work_dir', None)
self.temperature = kwargs.get('temperature', 0.2)
self.env_path = '.env'
self.config_list = self.get_config_list()
def clear_history(self, clear_previous_work=False):
"""
Clean up the cache directory to avoid conversation spillover between models.
Args:
clear_previous_work (bool, optional): Whether to clear the previous work directory. Defaults to True.
"""
if os.path.exists('.cache') and os.path.isdir('.cache'):
print('Deleting cache...')
shutil.rmtree('.cache')
if clear_previous_work:
self.clear_previous_work()
def clear_previous_work(self):
"""
Clean up the previous work directory.
"""
if os.path.exists('.coding') and os.path.isdir('.coding'):
print('Deleting previous work...')
shutil.rmtree('.coding')
def get_config_list(self):
"""
Get a list of configuration options for Autogen.
Returns:
list: List of configuration options.
"""
# For open sources models
# config_list = [
# {
# "model": "mistral",
# # "model": "ollama/llama2",
# "api_base": "http://localhost:1236/v1",
# "api_type": "open_ai",
# "api_key": "NULL"
# }
# ]
config_list = autogen.config_list_from_dotenv(
dotenv_file_path=self.env_path,
model_api_key_map={
"gpt-3.5-turbo-16k": "OPENAI_API_KEY",
"gpt-3.5-turbo": "OPENAI_API_KEY",
"gpt-4": "OPENAI_API_KEY",
},
filter_dict={
"model": {
self.model,
# "gpt-4"
}
}
)
return config_list
def instiate_agents(self):
logging.info("Initializing Agents")
coding_assistant_config = {
"name": "coding_assistant",
"llm_config": {
"request_timeout": 1000,
"seed": 42,
"config_list": self.config_list,
"temperature": self.temperature,
}
}
coding_runner_config = {
"name": "coding_runner",
"human_input_mode": "NEVER",
"max_consecutive_auto_reply": 5,
"is_termination_msg": self.is_termination_message,
"code_execution_config": {
"work_dir": "web",
"use_docker": False
},
"llm_config": {
"request_timeout": 1000,
"seed": 42,
"config_list": self.config_list,
"temperature": self.temperature,
},
"system_message": """Reply TERMINATE if the task has been solved at full satisfaction. Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""",
}
coding_assistant = AssistantAgent(**coding_assistant_config)
coding_runner = UserProxyAgent(**coding_runner_config)
return coding_assistant, coding_runner
def is_termination_message(self, x):
return x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE")
|