gxy commited on
Commit
0fdef0d
·
1 Parent(s): e58a035
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -13,7 +13,7 @@ model = BlipForConditionalGeneration.from_pretrained(
13
  _MODEL_PATH, use_auth_token=HF_TOKEN).half().eval().to(device)
14
 
15
 
16
- def inference(raw_image, model_n, question, strategy):
17
  if model_n == 'Image Captioning':
18
  input = processor(raw_image).to(device, torch.float16)
19
  with torch.no_grad():
@@ -38,8 +38,7 @@ def inference(raw_image, model_n, question, strategy):
38
  return 'caption: '+caption
39
 
40
 
41
- inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Textbox(
42
- lines=2, label="Question"), gr.inputs.Radio(choices=['Beam search', 'Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
43
  outputs = gr.outputs.Textbox(label="Output")
44
 
45
  title = "BLIP"
@@ -50,4 +49,4 @@ article = "<p style='text-align: center'><a href='https://github.com/IDEA-CCNL/F
50
 
51
 
52
  gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[
53
- ['demo.jpg', "Image Captioning", "None", "Nucleus sampling"]]).launch(enable_queue=True)
 
13
  _MODEL_PATH, use_auth_token=HF_TOKEN).half().eval().to(device)
14
 
15
 
16
+ def inference(raw_image, model_n, strategy):
17
  if model_n == 'Image Captioning':
18
  input = processor(raw_image).to(device, torch.float16)
19
  with torch.no_grad():
 
38
  return 'caption: '+caption
39
 
40
 
41
+ inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Radio(choices=['Beam search', 'Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
 
42
  outputs = gr.outputs.Textbox(label="Output")
43
 
44
  title = "BLIP"
 
49
 
50
 
51
  gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[
52
+ ['demo.jpg', "Image Captioning", "Nucleus sampling"]]).launch(enable_queue=True)