Spaces:
Sleeping
Sleeping
Matej Klemen
commited on
Commit
·
847b058
1
Parent(s):
b45ab07
Fix problem with fast tokenizer
Browse files- app.py +1 -1
- requirements.txt +1 -2
app.py
CHANGED
@@ -71,7 +71,7 @@ demo = gr.Interface(
|
|
71 |
|
72 |
if __name__ == "__main__":
|
73 |
model_name = "cjvt/SloBERTa-slo-word-spelling-annotator"
|
74 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
75 |
model = AutoModelForMaskedLM.from_pretrained(model_name)
|
76 |
mask_token = tokenizer.mask_token
|
77 |
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
|
|
71 |
|
72 |
if __name__ == "__main__":
|
73 |
model_name = "cjvt/SloBERTa-slo-word-spelling-annotator"
|
74 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
|
75 |
model = AutoModelForMaskedLM.from_pretrained(model_name)
|
76 |
mask_token = tokenizer.mask_token
|
77 |
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
requirements.txt
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
nltk
|
2 |
transformers
|
3 |
-
torch
|
4 |
-
tiktoken>=0.5.0
|
|
|
1 |
nltk
|
2 |
transformers
|
3 |
+
torch
|
|