from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch from backend.logger.logger import log_to_gsheet import threading model_id = "DebasishDhal99/polish-to-german-toponym-model-opus-mt-pl-de" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def polish_to_german_translation(polish_place): inputs = tokenizer(polish_place, return_tensors="pt", padding=True, truncation=True) inputs = {k: v.to(device) for k, v in inputs.items()} with torch.no_grad(): outputs = model.generate(**inputs, max_length=50) german_place = tokenizer.decode(outputs[0], skip_special_tokens=True) threading.Thread( target=log_to_gsheet, args=(polish_place, german_place, "Polish", "German"), daemon=True, ).start() # log_to_gsheet(polish_place, german_place, "Polish", "German") return german_place