Spaces:
Runtime error
Runtime error
bugfix
Browse files
app.py
CHANGED
@@ -15,7 +15,7 @@ model = BlipForConditionalGeneration.from_pretrained(
|
|
15 |
|
16 |
def inference(raw_image, model_n, strategy):
|
17 |
if model_n == 'Image Captioning':
|
18 |
-
input = processor(raw_image).to(device, torch.float16)
|
19 |
with torch.no_grad():
|
20 |
if strategy == "Beam search":
|
21 |
config = GenerationConfig(
|
@@ -38,7 +38,8 @@ def inference(raw_image, model_n, strategy):
|
|
38 |
return 'caption: '+caption
|
39 |
|
40 |
|
41 |
-
inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Radio(
|
|
|
42 |
outputs = gr.outputs.Textbox(label="Output")
|
43 |
|
44 |
title = "BLIP"
|
|
|
15 |
|
16 |
def inference(raw_image, model_n, strategy):
|
17 |
if model_n == 'Image Captioning':
|
18 |
+
input = processor(raw_image, return_tensors="pt").to(device, torch.float16)
|
19 |
with torch.no_grad():
|
20 |
if strategy == "Beam search":
|
21 |
config = GenerationConfig(
|
|
|
38 |
return 'caption: '+caption
|
39 |
|
40 |
|
41 |
+
inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Radio(
|
42 |
+
choices=['Beam search', 'Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
|
43 |
outputs = gr.outputs.Textbox(label="Output")
|
44 |
|
45 |
title = "BLIP"
|