billusanda007 commited on
Commit
e41f99a
·
verified ·
1 Parent(s): d336738

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -537,10 +537,11 @@ def generate_text_with_probs(initial_context, context_text , top_p, max_length,
537
 
538
  ##print(generated_text)
539
  initial_context = generated_text
540
- input_ids = tokenizer.encode(generated_text, return_tensors="pt").to(device='mps')
 
541
 
542
- use_ngram += 1
543
 
 
544
 
545
  else:
546
 
@@ -576,7 +577,7 @@ def generate_text_with_probs(initial_context, context_text , top_p, max_length,
576
 
577
  generated_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)
578
 
579
- total = use_llm + use_llm_back_up + use_ngram
580
 
581
  ##print(f"total: {use_llm} ({(use_llm / total) * 100:.2f}%)")
582
  ##print(f"use_llms: {use_llm_back_up} ({(use_llm_back_up / total) * 100:.2f}%)")
 
537
 
538
  ##print(generated_text)
539
  initial_context = generated_text
540
+ #input_ids = tokenizer.encode(generated_text, return_tensors="pt").to(device='mps')
541
+ input_ids = tokenizer.encode(generated_text, return_tensors="pt").to(device='cpu')
542
 
 
543
 
544
+ use_ngram += 1
545
 
546
  else:
547
 
 
577
 
578
  generated_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)
579
 
580
+ #total = use_llm + use_llm_back_up + use_ngram
581
 
582
  ##print(f"total: {use_llm} ({(use_llm / total) * 100:.2f}%)")
583
  ##print(f"use_llms: {use_llm_back_up} ({(use_llm_back_up / total) * 100:.2f}%)")