Spaces:
Running
Running
implemented background service for async launching of registered (in app) library functions
Browse files- .gitignore +2 -0
- app.py +36 -6
- background_service.ipynb +84 -0
- library.ipynb +75 -0
- test.ipynb +169 -1
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
test.ipynb
|
2 |
+
allkeys.py
|
app.py
CHANGED
@@ -5,6 +5,9 @@ import anvil.server
|
|
5 |
import pathlib
|
6 |
import textwrap
|
7 |
import google.generativeai as genai
|
|
|
|
|
|
|
8 |
|
9 |
anvil.server.connect('PLMOIU5VCGGUOJH2XORIBWV3-ZXZVFLWX7QFIIAF4')
|
10 |
|
@@ -15,14 +18,41 @@ MESSAGED={'title':'API Server',
|
|
15 |
tokenizer = AutoTokenizer.from_pretrained('allenai/specter')
|
16 |
encoder = AutoModel.from_pretrained('allenai/specter')
|
17 |
|
18 |
-
GOOGLE_API_KEY=os.getenv('GOOGLE_API_KEY')
|
19 |
-
genai.configure(api_key=GOOGLE_API_KEY)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
@anvil.server.callable
|
22 |
-
def
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
@anvil.server.callable
|
28 |
def encode_anvil(text):
|
|
|
5 |
import pathlib
|
6 |
import textwrap
|
7 |
import google.generativeai as genai
|
8 |
+
import import_ipynb
|
9 |
+
from library import call_gpt, call_gemini
|
10 |
+
from background_service import BackgroundTaskService
|
11 |
|
12 |
anvil.server.connect('PLMOIU5VCGGUOJH2XORIBWV3-ZXZVFLWX7QFIIAF4')
|
13 |
|
|
|
18 |
tokenizer = AutoTokenizer.from_pretrained('allenai/specter')
|
19 |
encoder = AutoModel.from_pretrained('allenai/specter')
|
20 |
|
21 |
+
# GOOGLE_API_KEY=os.getenv('GOOGLE_API_KEY')
|
22 |
+
# genai.configure(api_key=GOOGLE_API_KEY)
|
23 |
+
|
24 |
+
service=BackgroundTaskService(max_tasks=10)
|
25 |
+
service.register(call_gpt)
|
26 |
+
service.register(call_gemini)
|
27 |
+
|
28 |
+
@anvil.server.callable
|
29 |
+
def launch(func_name,*args):
|
30 |
+
global service
|
31 |
+
# Launch task
|
32 |
+
task_id = service.launch_task(func_name, *args)
|
33 |
+
print(f"Task launched with ID: {task_id}")
|
34 |
+
return task_id
|
35 |
|
36 |
@anvil.server.callable
|
37 |
+
def poll(task_id):
|
38 |
+
global service
|
39 |
+
# Poll for completion; if not complete return "In Progress" else return result
|
40 |
+
result = service.get_result(task_id)
|
41 |
+
if result=='No such task': return str(result)
|
42 |
+
elif result!='In Progress':
|
43 |
+
del service.results[task_id]
|
44 |
+
if isinstance(result, (int, float, str, list, dict, tuple)):
|
45 |
+
return result
|
46 |
+
else:
|
47 |
+
print(str(result))
|
48 |
+
return str(result)
|
49 |
+
else: return str(result)
|
50 |
+
|
51 |
+
# @anvil.server.callable
|
52 |
+
# def call_gemini(text):
|
53 |
+
# model = genai.GenerativeModel('gemini-pro')
|
54 |
+
# response = model.generate_content(text)
|
55 |
+
# return response.text
|
56 |
|
57 |
@anvil.server.callable
|
58 |
def encode_anvil(text):
|
background_service.ipynb
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import threading\n",
|
10 |
+
"import queue\n",
|
11 |
+
"import secrets\n",
|
12 |
+
"import concurrent.futures\n",
|
13 |
+
"from typing import Callable, Any, Dict"
|
14 |
+
]
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"cell_type": "code",
|
18 |
+
"execution_count": 2,
|
19 |
+
"metadata": {},
|
20 |
+
"outputs": [],
|
21 |
+
"source": [
|
22 |
+
"class BackgroundTaskService:\n",
|
23 |
+
" def __init__(self, max_tasks: int):\n",
|
24 |
+
" self.max_tasks = max_tasks\n",
|
25 |
+
" self.task_queue = queue.Queue()\n",
|
26 |
+
" self.results = {}\n",
|
27 |
+
" self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_tasks)\n",
|
28 |
+
" self.lock = threading.Lock() # To handle concurrent access to results dictionary\n",
|
29 |
+
" threading.Thread(target=self._worker, daemon=True).start()\n",
|
30 |
+
" self.registry={}\n",
|
31 |
+
" def register(self,func):\n",
|
32 |
+
" self.registry[func.__name__]=func\n",
|
33 |
+
" def _worker(self):\n",
|
34 |
+
" while True:\n",
|
35 |
+
" task_id, func, args = self.task_queue.get()\n",
|
36 |
+
" result = self.executor.submit(func, *args).result()\n",
|
37 |
+
" with self.lock:\n",
|
38 |
+
" self.results[task_id] = result\n",
|
39 |
+
"\n",
|
40 |
+
" def launch_task(self, func_name, *args) -> Any:\n",
|
41 |
+
" func=self.registry[func_name]\n",
|
42 |
+
" if self.task_queue.qsize() >= self.max_tasks:\n",
|
43 |
+
" return \"Queue Full\"\n",
|
44 |
+
" task_id = secrets.token_hex(16)\n",
|
45 |
+
" self.task_queue.put((task_id, func, args))\n",
|
46 |
+
" with self.lock:\n",
|
47 |
+
" self.results[task_id] = \"In Progress\"\n",
|
48 |
+
" return task_id\n",
|
49 |
+
"\n",
|
50 |
+
" def get_result(self, task_id) -> Any:\n",
|
51 |
+
" with self.lock:\n",
|
52 |
+
" return self.results.get(task_id, \"No such task\")"
|
53 |
+
]
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"cell_type": "code",
|
57 |
+
"execution_count": null,
|
58 |
+
"metadata": {},
|
59 |
+
"outputs": [],
|
60 |
+
"source": []
|
61 |
+
}
|
62 |
+
],
|
63 |
+
"metadata": {
|
64 |
+
"kernelspec": {
|
65 |
+
"display_name": "py310all",
|
66 |
+
"language": "python",
|
67 |
+
"name": "python3"
|
68 |
+
},
|
69 |
+
"language_info": {
|
70 |
+
"codemirror_mode": {
|
71 |
+
"name": "ipython",
|
72 |
+
"version": 3
|
73 |
+
},
|
74 |
+
"file_extension": ".py",
|
75 |
+
"mimetype": "text/x-python",
|
76 |
+
"name": "python",
|
77 |
+
"nbconvert_exporter": "python",
|
78 |
+
"pygments_lexer": "ipython3",
|
79 |
+
"version": "3.10.13"
|
80 |
+
}
|
81 |
+
},
|
82 |
+
"nbformat": 4,
|
83 |
+
"nbformat_minor": 2
|
84 |
+
}
|
library.ipynb
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 2,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import anvil.server\n",
|
10 |
+
"import openai\n",
|
11 |
+
"import pathlib\n",
|
12 |
+
"import textwrap\n",
|
13 |
+
"import google.generativeai as genai"
|
14 |
+
]
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"cell_type": "code",
|
18 |
+
"execution_count": null,
|
19 |
+
"metadata": {},
|
20 |
+
"outputs": [],
|
21 |
+
"source": [
|
22 |
+
"def call_gemini(text,key):\n",
|
23 |
+
" # response=f'calling gemini with key {key} and text {text}'\n",
|
24 |
+
" # return response\n",
|
25 |
+
" genai.configure(api_key=key)\n",
|
26 |
+
" model = genai.GenerativeModel('gemini-pro')\n",
|
27 |
+
" response = model.generate_content(text)\n",
|
28 |
+
" return response.text"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "code",
|
33 |
+
"execution_count": 3,
|
34 |
+
"metadata": {},
|
35 |
+
"outputs": [],
|
36 |
+
"source": [
|
37 |
+
"def call_gpt(prompt,key,model):\n",
|
38 |
+
" openai.api_key=key\n",
|
39 |
+
" try:\n",
|
40 |
+
" messages=[{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}]\n",
|
41 |
+
" messages+=[{\"role\": \"user\", \"content\": prompt}]\n",
|
42 |
+
" completions=openai.chat.completions.create( #for new version >.28 ) \n",
|
43 |
+
" # completions=openai.ChatCompletion.create(\n",
|
44 |
+
" model=model, \n",
|
45 |
+
" messages=messages)\n",
|
46 |
+
" # prediction=completions['choices'][0]['message']['content']\n",
|
47 |
+
" prediction=completions.choices[0].message.content.strip() # for new version >.28\n",
|
48 |
+
" except Exception as e:\n",
|
49 |
+
" return -1,str(e)\n",
|
50 |
+
" return 0,prediction"
|
51 |
+
]
|
52 |
+
}
|
53 |
+
],
|
54 |
+
"metadata": {
|
55 |
+
"kernelspec": {
|
56 |
+
"display_name": "py310all",
|
57 |
+
"language": "python",
|
58 |
+
"name": "python3"
|
59 |
+
},
|
60 |
+
"language_info": {
|
61 |
+
"codemirror_mode": {
|
62 |
+
"name": "ipython",
|
63 |
+
"version": 3
|
64 |
+
},
|
65 |
+
"file_extension": ".py",
|
66 |
+
"mimetype": "text/x-python",
|
67 |
+
"name": "python",
|
68 |
+
"nbconvert_exporter": "python",
|
69 |
+
"pygments_lexer": "ipython3",
|
70 |
+
"version": "3.10.13"
|
71 |
+
}
|
72 |
+
},
|
73 |
+
"nbformat": 4,
|
74 |
+
"nbformat_minor": 2
|
75 |
+
}
|
test.ipynb
CHANGED
@@ -11,7 +11,9 @@
|
|
11 |
"import requests\n",
|
12 |
"import json\n",
|
13 |
"from urllib.request import urlretrieve\n",
|
14 |
-
"import pandas as pd"
|
|
|
|
|
15 |
]
|
16 |
},
|
17 |
{
|
@@ -24,6 +26,130 @@
|
|
24 |
"anvil.server.connect('PLMOIU5VCGGUOJH2XORIBWV3-ZXZVFLWX7QFIIAF4')"
|
25 |
]
|
26 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
{
|
28 |
"cell_type": "code",
|
29 |
"execution_count": null,
|
@@ -173,6 +299,48 @@
|
|
173 |
"source": [
|
174 |
"df=pd.read_parquet('/tmp/validation_subset_int8.parquet')"
|
175 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
}
|
177 |
],
|
178 |
"metadata": {
|
|
|
11 |
"import requests\n",
|
12 |
"import json\n",
|
13 |
"from urllib.request import urlretrieve\n",
|
14 |
+
"import pandas as pd\n",
|
15 |
+
"import time\n",
|
16 |
+
"from allkeys import OPENAIKEY, GEMENIKEY"
|
17 |
]
|
18 |
},
|
19 |
{
|
|
|
26 |
"anvil.server.connect('PLMOIU5VCGGUOJH2XORIBWV3-ZXZVFLWX7QFIIAF4')"
|
27 |
]
|
28 |
},
|
29 |
+
{
|
30 |
+
"cell_type": "code",
|
31 |
+
"execution_count": null,
|
32 |
+
"metadata": {},
|
33 |
+
"outputs": [],
|
34 |
+
"source": [
|
35 |
+
"def fetch_result(task_id):\n",
|
36 |
+
" while True:\n",
|
37 |
+
" result=anvil.server.call('poll',task_id)\n",
|
38 |
+
" if result!='In Progress' or result=='No such task': break\n",
|
39 |
+
" else: \n",
|
40 |
+
" time.sleep(1)\n",
|
41 |
+
" print(result)\n",
|
42 |
+
" print(result)\n",
|
43 |
+
" return result"
|
44 |
+
]
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"cell_type": "code",
|
48 |
+
"execution_count": null,
|
49 |
+
"metadata": {},
|
50 |
+
"outputs": [],
|
51 |
+
"source": [
|
52 |
+
"text='write a python function to compute the nth digit of pi'\n",
|
53 |
+
"model='gpt-3.5-turbo'"
|
54 |
+
]
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"cell_type": "code",
|
58 |
+
"execution_count": null,
|
59 |
+
"metadata": {},
|
60 |
+
"outputs": [],
|
61 |
+
"source": [
|
62 |
+
"task_id=anvil.server.call('launch','call_gemini',text,GEMENIKEY)"
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"cell_type": "code",
|
67 |
+
"execution_count": null,
|
68 |
+
"metadata": {},
|
69 |
+
"outputs": [],
|
70 |
+
"source": [
|
71 |
+
"task_id=anvil.server.call('launch','call_gpt',text,OPENAIKEY,model)"
|
72 |
+
]
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"cell_type": "code",
|
76 |
+
"execution_count": null,
|
77 |
+
"metadata": {},
|
78 |
+
"outputs": [],
|
79 |
+
"source": [
|
80 |
+
"fetch_result(task_id)"
|
81 |
+
]
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"cell_type": "code",
|
85 |
+
"execution_count": null,
|
86 |
+
"metadata": {},
|
87 |
+
"outputs": [],
|
88 |
+
"source": [
|
89 |
+
"print(result)"
|
90 |
+
]
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"cell_type": "code",
|
94 |
+
"execution_count": null,
|
95 |
+
"metadata": {},
|
96 |
+
"outputs": [],
|
97 |
+
"source": [
|
98 |
+
"print(result[1],end='\\n')"
|
99 |
+
]
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"cell_type": "code",
|
103 |
+
"execution_count": null,
|
104 |
+
"metadata": {},
|
105 |
+
"outputs": [],
|
106 |
+
"source": [
|
107 |
+
"import pathlib\n",
|
108 |
+
"import textwrap\n",
|
109 |
+
"from IPython.display import display\n",
|
110 |
+
"from IPython.display import Markdown\n",
|
111 |
+
"\n",
|
112 |
+
"def to_markdown(text):\n",
|
113 |
+
" text = text.replace('•', ' *')\n",
|
114 |
+
" return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))"
|
115 |
+
]
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"cell_type": "code",
|
119 |
+
"execution_count": null,
|
120 |
+
"metadata": {},
|
121 |
+
"outputs": [],
|
122 |
+
"source": [
|
123 |
+
"prompt='write code that defines a transformer network from scratch in pytorch'"
|
124 |
+
]
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"cell_type": "code",
|
128 |
+
"execution_count": null,
|
129 |
+
"metadata": {},
|
130 |
+
"outputs": [],
|
131 |
+
"source": [
|
132 |
+
"response=anvil.server.call('call_gemini',prompt)"
|
133 |
+
]
|
134 |
+
},
|
135 |
+
{
|
136 |
+
"cell_type": "code",
|
137 |
+
"execution_count": null,
|
138 |
+
"metadata": {},
|
139 |
+
"outputs": [],
|
140 |
+
"source": [
|
141 |
+
"anvil.server.call('encode_anvil',prompt)"
|
142 |
+
]
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"cell_type": "code",
|
146 |
+
"execution_count": null,
|
147 |
+
"metadata": {},
|
148 |
+
"outputs": [],
|
149 |
+
"source": [
|
150 |
+
"to_markdown(response)"
|
151 |
+
]
|
152 |
+
},
|
153 |
{
|
154 |
"cell_type": "code",
|
155 |
"execution_count": null,
|
|
|
299 |
"source": [
|
300 |
"df=pd.read_parquet('/tmp/validation_subset_int8.parquet')"
|
301 |
]
|
302 |
+
},
|
303 |
+
{
|
304 |
+
"cell_type": "code",
|
305 |
+
"execution_count": null,
|
306 |
+
"metadata": {},
|
307 |
+
"outputs": [],
|
308 |
+
"source": [
|
309 |
+
"import torch\n",
|
310 |
+
"import torch.nn as nn\n",
|
311 |
+
"import torch.nn.functional as F\n",
|
312 |
+
"\n",
|
313 |
+
"class Transformer(nn.Module):\n",
|
314 |
+
" def __init__(self, d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout=0.1):\n",
|
315 |
+
" super(Transformer, self).__init__()\n",
|
316 |
+
" self.transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout)\n",
|
317 |
+
"\n",
|
318 |
+
" def forward(self, src, tgt):\n",
|
319 |
+
" output = self.transformer(src, tgt)\n",
|
320 |
+
" return output\n",
|
321 |
+
"\n",
|
322 |
+
"# Example usage:\n",
|
323 |
+
"# Define the model parameters\n",
|
324 |
+
"d_model = 512\n",
|
325 |
+
"nhead = 8\n",
|
326 |
+
"num_encoder_layers = 6\n",
|
327 |
+
"num_decoder_layers = 6\n",
|
328 |
+
"dim_feedforward = 2048\n",
|
329 |
+
"dropout = 0.1\n",
|
330 |
+
"\n",
|
331 |
+
"# Initialize the model\n",
|
332 |
+
"model = Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout)\n",
|
333 |
+
"\n",
|
334 |
+
"# Generate some sample data\n",
|
335 |
+
"src = torch.rand(10, 32, 512)\n",
|
336 |
+
"tgt = torch.rand(20, 32, 512)\n",
|
337 |
+
"\n",
|
338 |
+
"# Pass the data through the model\n",
|
339 |
+
"output = model(src, tgt)\n",
|
340 |
+
"\n",
|
341 |
+
"# Print the output shape\n",
|
342 |
+
"print(output.shape)"
|
343 |
+
]
|
344 |
}
|
345 |
],
|
346 |
"metadata": {
|