fix : cuda -> cpu.
Browse files
app.py
CHANGED
@@ -8,12 +8,12 @@ bw_modelcard = "amurienne/kellag-m2m100"
|
|
8 |
fw_model = AutoModelForSeq2SeqLM.from_pretrained(fw_modelcard)
|
9 |
fw_tokenizer = AutoTokenizer.from_pretrained(fw_modelcard)
|
10 |
|
11 |
-
fw_translation_pipeline = pipeline("translation", model=fw_model, tokenizer=fw_tokenizer, src_lang='fr', tgt_lang='br', max_length=400, device="
|
12 |
|
13 |
bw_model = AutoModelForSeq2SeqLM.from_pretrained(bw_modelcard)
|
14 |
bw_tokenizer = AutoTokenizer.from_pretrained(bw_modelcard)
|
15 |
|
16 |
-
bw_translation_pipeline = pipeline("translation", model=bw_model, tokenizer=bw_tokenizer, src_lang='br', tgt_lang='fr', max_length=400, device="
|
17 |
|
18 |
# translation function
|
19 |
def translate(text, direction):
|
|
|
8 |
fw_model = AutoModelForSeq2SeqLM.from_pretrained(fw_modelcard)
|
9 |
fw_tokenizer = AutoTokenizer.from_pretrained(fw_modelcard)
|
10 |
|
11 |
+
fw_translation_pipeline = pipeline("translation", model=fw_model, tokenizer=fw_tokenizer, src_lang='fr', tgt_lang='br', max_length=400, device="cpu")
|
12 |
|
13 |
bw_model = AutoModelForSeq2SeqLM.from_pretrained(bw_modelcard)
|
14 |
bw_tokenizer = AutoTokenizer.from_pretrained(bw_modelcard)
|
15 |
|
16 |
+
bw_translation_pipeline = pipeline("translation", model=bw_model, tokenizer=bw_tokenizer, src_lang='br', tgt_lang='fr', max_length=400, device="cpu")
|
17 |
|
18 |
# translation function
|
19 |
def translate(text, direction):
|