Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -315,8 +315,8 @@ model_name = "gpt2-large"
|
|
315 |
model = GPT2LMHeadModel.from_pretrained(model_name)
|
316 |
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
317 |
|
318 |
-
device = torch.device("mps")
|
319 |
-
model.to(device)
|
320 |
model.eval()
|
321 |
|
322 |
|
@@ -399,8 +399,8 @@ def generate_text_with_probs(initial_context, context_text , top_p, max_length,
|
|
399 |
|
400 |
Tokens = {}
|
401 |
|
402 |
-
input_ids = tokenizer.encode(initial_context, return_tensors="pt").to(device='mps')
|
403 |
-
|
404 |
generated_text = initial_context
|
405 |
token_tables = []
|
406 |
|
|
|
315 |
model = GPT2LMHeadModel.from_pretrained(model_name)
|
316 |
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
317 |
|
318 |
+
#device = torch.device("mps")
|
319 |
+
#model.to(device)
|
320 |
model.eval()
|
321 |
|
322 |
|
|
|
399 |
|
400 |
Tokens = {}
|
401 |
|
402 |
+
#input_ids = tokenizer.encode(initial_context, return_tensors="pt").to(device='mps')
|
403 |
+
input_ids = tokenizer.encode(initial_text, return_tensors="pt").to(device='cpu')
|
404 |
generated_text = initial_context
|
405 |
token_tables = []
|
406 |
|