Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -212,7 +212,7 @@ def set_seed():
|
|
212 |
|
213 |
|
214 |
|
215 |
-
|
216 |
with st.sidebar:
|
217 |
|
218 |
st.image("Koya_Presentation-removebg-preview.png")
|
@@ -236,39 +236,38 @@ with st.sidebar:
|
|
236 |
)
|
237 |
url = "https://drive.google.com/file/d/1eWat34ot3j8onIeKDnJscKalp2oYnn8O/view"
|
238 |
st.write("check out the paper [here](%s)" % url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
-
|
241 |
-
footer()
|
242 |
-
|
243 |
-
sentence = st.text_input("Please input a sample sentence in the target language")
|
244 |
-
|
245 |
-
models = get_model_infos(multilingual=None)
|
246 |
-
selected_models = st.multiselect(
|
247 |
-
"Select of number of models you would like to compare", models["id"], max_selections=5
|
248 |
-
)
|
249 |
-
|
250 |
-
run = st.button("Get Scores")
|
251 |
-
if run:
|
252 |
-
|
253 |
-
progress_text = "Computing recommendation Scores"
|
254 |
-
st.write(progress_text)
|
255 |
-
my_bar = st.progress(0)
|
256 |
-
|
257 |
-
scores = {}
|
258 |
-
for index, model_id in enumerate(selected_models):
|
259 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
260 |
-
model = AutoModelWithLMHead.from_pretrained(model_id)
|
261 |
-
if model_id.startswith("castorini"):
|
262 |
-
tokenizer.model_max_length = 512
|
263 |
-
MLM_MASK_TOKEN = tokenizer.mask_token_id # [(103, '[MASK]')]
|
264 |
-
MLM_UNK_TOKEN = tokenizer.unk_token_id
|
265 |
-
|
266 |
-
BATCH_SIZE = 1
|
267 |
-
score = get_sense_score_batched(
|
268 |
-
sentence, tokenizer, model, MLM_MASK_TOKEN, MLM_UNK_TOKEN, BATCH_SIZE
|
269 |
-
)
|
270 |
-
scores[model_id] = score
|
271 |
-
my_bar.progress(index + 1 / len(selected_models))
|
272 |
-
scores = sort_dictionary(scores)
|
273 |
-
st.write("Our recommendation is:", scores)
|
274 |
-
|
|
|
212 |
|
213 |
|
214 |
|
215 |
+
|
216 |
with st.sidebar:
|
217 |
|
218 |
st.image("Koya_Presentation-removebg-preview.png")
|
|
|
236 |
)
|
237 |
url = "https://drive.google.com/file/d/1eWat34ot3j8onIeKDnJscKalp2oYnn8O/view"
|
238 |
st.write("check out the paper [here](%s)" % url)
|
239 |
+
with st.sidebar:
|
240 |
+
footer()
|
241 |
+
|
242 |
+
sentence = st.text_input("Please input a sample sentence in the target language")
|
243 |
+
|
244 |
+
models = get_model_infos(multilingual=None)
|
245 |
+
selected_models = st.multiselect(
|
246 |
+
"Select of number of models you would like to compare", models["id"], max_selections=5
|
247 |
+
)
|
248 |
+
|
249 |
+
run = st.button("Get Scores")
|
250 |
+
if run:
|
251 |
+
|
252 |
+
progress_text = "Computing recommendation Scores"
|
253 |
+
st.write(progress_text)
|
254 |
+
my_bar = st.progress(0)
|
255 |
+
|
256 |
+
scores = {}
|
257 |
+
for index, model_id in enumerate(selected_models):
|
258 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
259 |
+
model = AutoModelWithLMHead.from_pretrained(model_id)
|
260 |
+
if model_id.startswith("castorini"):
|
261 |
+
tokenizer.model_max_length = 512
|
262 |
+
MLM_MASK_TOKEN = tokenizer.mask_token_id # [(103, '[MASK]')]
|
263 |
+
MLM_UNK_TOKEN = tokenizer.unk_token_id
|
264 |
+
|
265 |
+
BATCH_SIZE = 1
|
266 |
+
score = get_sense_score_batched(
|
267 |
+
sentence, tokenizer, model, MLM_MASK_TOKEN, MLM_UNK_TOKEN, BATCH_SIZE
|
268 |
+
)
|
269 |
+
scores[model_id] = score
|
270 |
+
my_bar.progress(index + 1 / len(selected_models))
|
271 |
+
scores = sort_dictionary(scores)
|
272 |
+
st.write("Our recommendation is:", scores)
|
273 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|