Tim commited on
Commit
feee91b
·
1 Parent(s): b9aeae5

feat: add tab

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -3,6 +3,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from peft import PeftConfig, PeftModel
4
  import torch
5
  from transformers import BitsAndBytesConfig
 
6
  # models
7
  base_model_name = "mistralai/Mistral-7B-Instruct-v0.2"
8
  adapter_model_name = "TymofiiNas/results"
@@ -23,7 +24,7 @@ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
23
 
24
 
25
  def generate_response(text):
26
- text = "<s> [INST]"+text+ "[/INST]"
27
  encoded_input = tokenizer(text, return_tensors="pt", add_special_tokens=False)
28
  model_inputs = encoded_input.to("cuda")
29
 
@@ -36,7 +37,7 @@ def generate_response(text):
36
 
37
  decoded_output = tokenizer.batch_decode(generated_ids)
38
 
39
- return decoded_output[0][len(text):]
40
 
41
 
42
  demo = gr.Interface(
@@ -44,5 +45,4 @@ demo = gr.Interface(
44
  inputs="text",
45
  outputs="text",
46
  )
47
-
48
- demo.launch()
 
3
  from peft import PeftConfig, PeftModel
4
  import torch
5
  from transformers import BitsAndBytesConfig
6
+
7
  # models
8
  base_model_name = "mistralai/Mistral-7B-Instruct-v0.2"
9
  adapter_model_name = "TymofiiNas/results"
 
24
 
25
 
26
  def generate_response(text):
27
+ text = "<s> [INST]" + text + "[/INST]"
28
  encoded_input = tokenizer(text, return_tensors="pt", add_special_tokens=False)
29
  model_inputs = encoded_input.to("cuda")
30
 
 
37
 
38
  decoded_output = tokenizer.batch_decode(generated_ids)
39
 
40
+ return decoded_output[0][len(text) :]
41
 
42
 
43
  demo = gr.Interface(
 
45
  inputs="text",
46
  outputs="text",
47
  )
48
+ gr.TabbedInterface([demo]).queue().launch()