Spaces:
Sleeping
Sleeping
Update agents.py
Browse files
agents.py
CHANGED
|
@@ -49,7 +49,7 @@ class Llama2:
|
|
| 49 |
self.cont = st.empty()
|
| 50 |
self.status = self.cont.status(label="Fireworks Llama2", state="complete", expanded=False)
|
| 51 |
|
| 52 |
-
async def chatFireworks(self,
|
| 53 |
|
| 54 |
fireworks.client.api_key = self.fireworksAPI
|
| 55 |
|
|
@@ -71,13 +71,18 @@ class Llama2:
|
|
| 71 |
else:
|
| 72 |
generated_responses.append(message[2])
|
| 73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
# Prepare data to send to the chatgpt-api.shn.hk
|
| 75 |
response = fireworks.client.ChatCompletion.create(
|
| 76 |
model="accounts/fireworks/models/llama-v2-7b-chat",
|
| 77 |
messages=[
|
| 78 |
-
{"role": "system", "content":
|
| 79 |
-
|
| 80 |
-
*[{"role": "assistant", "content": response} for response in generated_responses],
|
| 81 |
{"role": "user", "content": question}
|
| 82 |
],
|
| 83 |
stream=False,
|
|
@@ -524,6 +529,12 @@ class Copilot:
|
|
| 524 |
else:
|
| 525 |
generated_responses.append(message[2])
|
| 526 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 527 |
response = await g4f.ChatCompletion.create_async(
|
| 528 |
model=g4f.models.gpt_4,
|
| 529 |
provider=g4f.Provider.Bing,
|
|
@@ -533,7 +544,7 @@ class Copilot:
|
|
| 533 |
*[{"role": "assistant", "content": message} for message in generated_responses],
|
| 534 |
{"role": "user", "content": question}
|
| 535 |
])
|
| 536 |
-
|
| 537 |
answer = f"Bing/Copilot: {response}"
|
| 538 |
print(answer)
|
| 539 |
return answer
|
|
@@ -972,13 +983,19 @@ class ChatGPT:
|
|
| 972 |
else:
|
| 973 |
generated_responses.append(message[2])
|
| 974 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 975 |
response = await g4f.ChatCompletion.create_async(
|
| 976 |
model="gpt-3.5-turbo",
|
| 977 |
-
provider=g4f.Provider.
|
| 978 |
messages=[
|
| 979 |
{"role": "system", "content": instruction},
|
| 980 |
-
*[{"role": "user", "content": message} for message in past_user_inputs],
|
| 981 |
-
*[{"role": "assistant", "content": message} for message in generated_responses],
|
| 982 |
{"role": "user", "content": question}
|
| 983 |
])
|
| 984 |
|
|
@@ -1009,7 +1026,13 @@ class ChatGPT:
|
|
| 1009 |
past_user_inputs.append(message[2])
|
| 1010 |
else:
|
| 1011 |
generated_responses.append(message[2])
|
| 1012 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1013 |
url = f"https://api.pawan.krd/v1/chat/completions"
|
| 1014 |
|
| 1015 |
headers = {
|
|
@@ -1021,14 +1044,15 @@ class ChatGPT:
|
|
| 1021 |
"max_tokens": 2000,
|
| 1022 |
"messages": [
|
| 1023 |
{"role": "system", "content": instruction},
|
| 1024 |
-
{"role": "user", "content": str(past_user_inputs
|
| 1025 |
-
{"role": "assistant", "content": str(generated_responses
|
| 1026 |
{"role": "user", "content": question}
|
| 1027 |
]
|
| 1028 |
}
|
| 1029 |
|
| 1030 |
response = requests.request("POST", url, json=payload, headers=headers)
|
| 1031 |
response_data = response.json()
|
|
|
|
| 1032 |
generated_answer = response_data["choices"][0]["message"]["content"]
|
| 1033 |
answer = f"GPT-3,5: {generated_answer}"
|
| 1034 |
print(answer)
|
|
@@ -1410,13 +1434,13 @@ class ChatGPT:
|
|
| 1410 |
|
| 1411 |
async def askLlama(self, question):
|
| 1412 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 1413 |
-
llama =
|
| 1414 |
response = await llama.handleInput(question)
|
| 1415 |
print(response)
|
| 1416 |
return response
|
| 1417 |
|
| 1418 |
async def askBing(self, question):
|
| 1419 |
-
bing =
|
| 1420 |
response = await bing.handleInput(question)
|
| 1421 |
print(response)
|
| 1422 |
return response
|
|
@@ -1470,20 +1494,25 @@ class Claude3:
|
|
| 1470 |
|
| 1471 |
for message in messages:
|
| 1472 |
if message[1] == 'client':
|
| 1473 |
-
past_user_inputs.append(message[2])
|
| 1474 |
else:
|
| 1475 |
-
generated_responses.append(message[2])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1476 |
|
| 1477 |
message = client.messages.create(
|
| 1478 |
model="claude-3-opus-20240229",
|
| 1479 |
max_tokens=2500,
|
| 1480 |
temperature=0,
|
| 1481 |
-
system
|
| 1482 |
-
messages=
|
| 1483 |
-
{"role": "user", "content": [{"type": "text", "text": str(past_user_inputs[-1])}]},
|
| 1484 |
-
{"role": "assistant", "content": [{"type": "text", "text": str(generated_responses[-1])}]},
|
| 1485 |
-
{"role": "user", "content": [{"type": "text", "text": question}]}
|
| 1486 |
-
]
|
| 1487 |
)
|
| 1488 |
|
| 1489 |
print(message.content)
|
|
@@ -1492,7 +1521,7 @@ class Claude3:
|
|
| 1492 |
|
| 1493 |
except Exception as e:
|
| 1494 |
print(f"Error: {e}")
|
| 1495 |
-
|
| 1496 |
async def handlerClaude(self, websocket):
|
| 1497 |
self.stat.empty()
|
| 1498 |
self.cont.empty()
|
|
@@ -1915,18 +1944,19 @@ class ForefrontAI:
|
|
| 1915 |
else:
|
| 1916 |
generated_responses.append(message[2])
|
| 1917 |
|
| 1918 |
-
|
| 1919 |
-
|
| 1920 |
-
|
| 1921 |
-
|
|
|
|
| 1922 |
|
| 1923 |
# Construct the message sequence for the chat model
|
| 1924 |
response = ff.chat.completions.create(
|
| 1925 |
messages=[
|
| 1926 |
-
|
| 1927 |
-
|
| 1928 |
-
|
| 1929 |
-
|
| 1930 |
],
|
| 1931 |
stream=False,
|
| 1932 |
model="forefront/neural-chat-7b-v3-1-chatml", # Replace with the actual model name
|
|
@@ -2699,13 +2729,13 @@ class CharacterAI:
|
|
| 2699 |
|
| 2700 |
async def askLlama(self, question):
|
| 2701 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 2702 |
-
llama =
|
| 2703 |
response = await llama.handleInput(question)
|
| 2704 |
print(response)
|
| 2705 |
return response
|
| 2706 |
|
| 2707 |
async def askBing(self, question):
|
| 2708 |
-
bing =
|
| 2709 |
response = await bing.handleInput(question)
|
| 2710 |
print(response)
|
| 2711 |
return response
|
|
@@ -3110,13 +3140,13 @@ class Chaindesk:
|
|
| 3110 |
|
| 3111 |
async def askLlama(self, question):
|
| 3112 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 3113 |
-
llama =
|
| 3114 |
response = await llama.handleInput(question)
|
| 3115 |
print(response)
|
| 3116 |
return response
|
| 3117 |
|
| 3118 |
async def askBing(self, question):
|
| 3119 |
-
bing =
|
| 3120 |
response = await bing.handleInput(question)
|
| 3121 |
print(response)
|
| 3122 |
return response
|
|
@@ -3494,7 +3524,7 @@ class Flowise:
|
|
| 3494 |
return characterID
|
| 3495 |
|
| 3496 |
else:
|
| 3497 |
-
response = f"You didn't choose any character to establish a connection with. Do you want try again or maybe use some other copmmand-fuunction?"
|
| 3498 |
print(response)
|
| 3499 |
await self.handleInput(respoonse)
|
| 3500 |
|
|
@@ -3521,13 +3551,13 @@ class Flowise:
|
|
| 3521 |
|
| 3522 |
async def askLlama(self, question):
|
| 3523 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 3524 |
-
llama =
|
| 3525 |
response = await llama.handleInput(question)
|
| 3526 |
print(response)
|
| 3527 |
return response
|
| 3528 |
|
| 3529 |
async def askBing(self, question):
|
| 3530 |
-
bing =
|
| 3531 |
response = await bing.handleInput(question)
|
| 3532 |
print(response)
|
| 3533 |
return response
|
|
|
|
| 49 |
self.cont = st.empty()
|
| 50 |
self.status = self.cont.status(label="Fireworks Llama2", state="complete", expanded=False)
|
| 51 |
|
| 52 |
+
async def chatFireworks(self, instruction, question):
|
| 53 |
|
| 54 |
fireworks.client.api_key = self.fireworksAPI
|
| 55 |
|
|
|
|
| 71 |
else:
|
| 72 |
generated_responses.append(message[2])
|
| 73 |
|
| 74 |
+
# Create a list of message dictionaries for the conversation history
|
| 75 |
+
conversation_history = []
|
| 76 |
+
for user_input, generated_response in zip(past_user_inputs, generated_responses):
|
| 77 |
+
conversation_history.append({"role": "user", "content": str(user_input)})
|
| 78 |
+
conversation_history.append({"role": "assistant", "content": str(generated_response)})
|
| 79 |
+
|
| 80 |
# Prepare data to send to the chatgpt-api.shn.hk
|
| 81 |
response = fireworks.client.ChatCompletion.create(
|
| 82 |
model="accounts/fireworks/models/llama-v2-7b-chat",
|
| 83 |
messages=[
|
| 84 |
+
{"role": "system", "content": instruction},
|
| 85 |
+
conversation_history,
|
|
|
|
| 86 |
{"role": "user", "content": question}
|
| 87 |
],
|
| 88 |
stream=False,
|
|
|
|
| 529 |
else:
|
| 530 |
generated_responses.append(message[2])
|
| 531 |
|
| 532 |
+
# Create a list of message dictionaries for the conversation history
|
| 533 |
+
conversation_history = []
|
| 534 |
+
for user_input, generated_response in zip(past_user_inputs, generated_responses):
|
| 535 |
+
conversation_history.append({"role": "user", "content": str(user_input)})
|
| 536 |
+
conversation_history.append({"role": "assistant", "content": str(generated_response)})
|
| 537 |
+
|
| 538 |
response = await g4f.ChatCompletion.create_async(
|
| 539 |
model=g4f.models.gpt_4,
|
| 540 |
provider=g4f.Provider.Bing,
|
|
|
|
| 544 |
*[{"role": "assistant", "content": message} for message in generated_responses],
|
| 545 |
{"role": "user", "content": question}
|
| 546 |
])
|
| 547 |
+
|
| 548 |
answer = f"Bing/Copilot: {response}"
|
| 549 |
print(answer)
|
| 550 |
return answer
|
|
|
|
| 983 |
else:
|
| 984 |
generated_responses.append(message[2])
|
| 985 |
|
| 986 |
+
# Create a list of message dictionaries for the conversation history
|
| 987 |
+
conversation_history = []
|
| 988 |
+
for user_input, generated_response in zip(past_user_inputs, generated_responses):
|
| 989 |
+
conversation_history.append({"role": "user", "content": str(user_input)})
|
| 990 |
+
conversation_history.append({"role": "assistant", "content": str(generated_response)})
|
| 991 |
+
|
| 992 |
response = await g4f.ChatCompletion.create_async(
|
| 993 |
model="gpt-3.5-turbo",
|
| 994 |
+
provider=g4f.Provider.ChatgptX,
|
| 995 |
messages=[
|
| 996 |
{"role": "system", "content": instruction},
|
| 997 |
+
*[{"role": "user", "content": str(message)} for message in past_user_inputs],
|
| 998 |
+
*[{"role": "assistant", "content": str(message)} for message in generated_responses],
|
| 999 |
{"role": "user", "content": question}
|
| 1000 |
])
|
| 1001 |
|
|
|
|
| 1026 |
past_user_inputs.append(message[2])
|
| 1027 |
else:
|
| 1028 |
generated_responses.append(message[2])
|
| 1029 |
+
|
| 1030 |
+
# Create a list of message dictionaries for the conversation history
|
| 1031 |
+
conversation_history = []
|
| 1032 |
+
for user_input, generated_response in zip(past_user_inputs, generated_responses):
|
| 1033 |
+
conversation_history.append({"role": "user", "content": str(user_input)})
|
| 1034 |
+
conversation_history.append({"role": "assistant", "content": str(generated_response)})
|
| 1035 |
+
|
| 1036 |
url = f"https://api.pawan.krd/v1/chat/completions"
|
| 1037 |
|
| 1038 |
headers = {
|
|
|
|
| 1044 |
"max_tokens": 2000,
|
| 1045 |
"messages": [
|
| 1046 |
{"role": "system", "content": instruction},
|
| 1047 |
+
*[{"role": "user", "content": str(message)} for message in past_user_inputs],
|
| 1048 |
+
*[{"role": "assistant", "content": str(message)} for message in generated_responses],
|
| 1049 |
{"role": "user", "content": question}
|
| 1050 |
]
|
| 1051 |
}
|
| 1052 |
|
| 1053 |
response = requests.request("POST", url, json=payload, headers=headers)
|
| 1054 |
response_data = response.json()
|
| 1055 |
+
print(response_data)
|
| 1056 |
generated_answer = response_data["choices"][0]["message"]["content"]
|
| 1057 |
answer = f"GPT-3,5: {generated_answer}"
|
| 1058 |
print(answer)
|
|
|
|
| 1434 |
|
| 1435 |
async def askLlama(self, question):
|
| 1436 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 1437 |
+
llama = Llama2(api)
|
| 1438 |
response = await llama.handleInput(question)
|
| 1439 |
print(response)
|
| 1440 |
return response
|
| 1441 |
|
| 1442 |
async def askBing(self, question):
|
| 1443 |
+
bing = Copilot()
|
| 1444 |
response = await bing.handleInput(question)
|
| 1445 |
print(response)
|
| 1446 |
return response
|
|
|
|
| 1494 |
|
| 1495 |
for message in messages:
|
| 1496 |
if message[1] == 'client':
|
| 1497 |
+
past_user_inputs.append(str(message[2]))
|
| 1498 |
else:
|
| 1499 |
+
generated_responses.append(str(message[2]))
|
| 1500 |
+
|
| 1501 |
+
# Create a list of message dictionaries for the conversation history
|
| 1502 |
+
conversation_history = []
|
| 1503 |
+
for user_input, generated_response in zip(past_user_inputs, generated_responses):
|
| 1504 |
+
conversation_history.append({"role": "user", "content": [{"type": "text", "text": str(user_input)}]})
|
| 1505 |
+
conversation_history.append({"role": "assistant", "content": [{"type": "text", "text": str(generated_response)}]})
|
| 1506 |
+
|
| 1507 |
+
# Add the current question to the conversation history
|
| 1508 |
+
conversation_history.append({"role": "user", "content": [{"type": "text", "text": question}]})
|
| 1509 |
|
| 1510 |
message = client.messages.create(
|
| 1511 |
model="claude-3-opus-20240229",
|
| 1512 |
max_tokens=2500,
|
| 1513 |
temperature=0,
|
| 1514 |
+
system=instruction,
|
| 1515 |
+
messages=conversation_history
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1516 |
)
|
| 1517 |
|
| 1518 |
print(message.content)
|
|
|
|
| 1521 |
|
| 1522 |
except Exception as e:
|
| 1523 |
print(f"Error: {e}")
|
| 1524 |
+
|
| 1525 |
async def handlerClaude(self, websocket):
|
| 1526 |
self.stat.empty()
|
| 1527 |
self.cont.empty()
|
|
|
|
| 1944 |
else:
|
| 1945 |
generated_responses.append(message[2])
|
| 1946 |
|
| 1947 |
+
# Create a list of message dictionaries for the conversation history
|
| 1948 |
+
conversation_history = []
|
| 1949 |
+
for user_input, generated_response in zip(past_user_inputs, generated_responses):
|
| 1950 |
+
conversation_history.append({"role": "user", "content": str(user_input)})
|
| 1951 |
+
conversation_history.append({"role": "assistant", "content": str(generated_response)})
|
| 1952 |
|
| 1953 |
# Construct the message sequence for the chat model
|
| 1954 |
response = ff.chat.completions.create(
|
| 1955 |
messages=[
|
| 1956 |
+
{"role": "system", "content": instruction},
|
| 1957 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
| 1958 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
| 1959 |
+
{"role": "user", "content": question}
|
| 1960 |
],
|
| 1961 |
stream=False,
|
| 1962 |
model="forefront/neural-chat-7b-v3-1-chatml", # Replace with the actual model name
|
|
|
|
| 2729 |
|
| 2730 |
async def askLlama(self, question):
|
| 2731 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 2732 |
+
llama = Llama2(api)
|
| 2733 |
response = await llama.handleInput(question)
|
| 2734 |
print(response)
|
| 2735 |
return response
|
| 2736 |
|
| 2737 |
async def askBing(self, question):
|
| 2738 |
+
bing = Copilot()
|
| 2739 |
response = await bing.handleInput(question)
|
| 2740 |
print(response)
|
| 2741 |
return response
|
|
|
|
| 3140 |
|
| 3141 |
async def askLlama(self, question):
|
| 3142 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 3143 |
+
llama = Llama2(api)
|
| 3144 |
response = await llama.handleInput(question)
|
| 3145 |
print(response)
|
| 3146 |
return response
|
| 3147 |
|
| 3148 |
async def askBing(self, question):
|
| 3149 |
+
bing = Copilot()
|
| 3150 |
response = await bing.handleInput(question)
|
| 3151 |
print(response)
|
| 3152 |
return response
|
|
|
|
| 3524 |
return characterID
|
| 3525 |
|
| 3526 |
else:
|
| 3527 |
+
response = f"You didn't choose any character to establish a connection with. Do you want try once again or maybe use some other copmmand-fuunction?"
|
| 3528 |
print(response)
|
| 3529 |
await self.handleInput(respoonse)
|
| 3530 |
|
|
|
|
| 3551 |
|
| 3552 |
async def askLlama(self, question):
|
| 3553 |
api = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
|
| 3554 |
+
llama = Llama2(api)
|
| 3555 |
response = await llama.handleInput(question)
|
| 3556 |
print(response)
|
| 3557 |
return response
|
| 3558 |
|
| 3559 |
async def askBing(self, question):
|
| 3560 |
+
bing = Copilot()
|
| 3561 |
response = await bing.handleInput(question)
|
| 3562 |
print(response)
|
| 3563 |
return response
|