Shenuki commited on
Commit
8a24d37
·
verified ·
1 Parent(s): 273a0b0

Create server.py

Browse files
Files changed (1) hide show
  1. server.py +31 -0
server.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from transformers import AutoProcessor, SeamlessM4TForTextToText
4
+ import gradio as gr
5
+
6
+ MODEL_NAME = "facebook/hf-seamless-m4t-medium"
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ processor = AutoProcessor.from_pretrained(MODEL_NAME)
10
+ model = SeamlessM4TForTextToText.from_pretrained(MODEL_NAME).to(device).eval()
11
+
12
+ def translate(text, sourceLang, targetLang, auto_detect):
13
+ src = None if auto_detect else sourceLang
14
+ inputs = processor(text=text, src_lang=src, return_tensors="pt").to(device)
15
+ tokens = model.generate(**inputs, tgt_lang=targetLang)
16
+ return processor.decode(tokens[0].tolist(), skip_special_tokens=True)
17
+
18
+ iface = gr.Interface(
19
+ fn=translate,
20
+ inputs=[
21
+ gr.Textbox(label="Text to translate"),
22
+ gr.Textbox(label="Source Language (e.g. eng)"),
23
+ gr.Textbox(label="Target Language (e.g. fra)"),
24
+ gr.Checkbox(label="Auto-detect source")
25
+ ],
26
+ outputs=gr.Textbox(label="Translated Text"),
27
+ title="iVoice Translate"
28
+ )
29
+
30
+ if __name__ == "__main__":
31
+ iface.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))