prithivMLmods commited on
Commit
7ea5e47
·
verified ·
1 Parent(s): 217d753

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -31
app.py CHANGED
@@ -1,32 +1,42 @@
1
- import gradio as gr
2
- import requests
3
- from PIL import Image
4
- from transformers import BlipProcessor, BlipForConditionalGeneration
5
- import time
6
-
7
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
8
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
9
-
10
- def caption(img, min_len, max_len):
11
- raw_image = Image.open(img).convert('RGB')
12
-
13
- inputs = processor(raw_image, return_tensors="pt")
14
-
15
- out = model.generate(**inputs, min_length=min_len, max_length=max_len)
16
- return processor.decode(out[0], skip_special_tokens=True)
17
-
18
- def greet(img, min_len, max_len):
19
- start = time.time()
20
- result = caption(img, min_len, max_len)
21
- end = time.time()
22
- total_time = str(end - start)
23
- result = result + '\n' + total_time + ' seconds'
24
- return result
25
-
26
- iface = gr.Interface(fn=greet,
27
- title='',
28
- description=" ",
29
- inputs=[gr.Image(type='filepath', label='Image'), gr.Slider(label='Minimum Length', minimum=1, maximum=1000, value=30), gr.Slider(label='Maximum Length', minimum=1, maximum=1000, value=100)],
30
- outputs=gr.Textbox(label='Caption'),
31
- theme = gr.themes.Base(primary_hue="teal",secondary_hue="teal",neutral_hue="slate"),)
 
 
 
 
 
 
 
 
 
 
32
  iface.launch()
 
1
+ import gradio as gr
2
+ import requests
3
+ from PIL import Image
4
+ from transformers import BlipProcessor, BlipForConditionalGeneration
5
+ import time
6
+
7
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
8
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
9
+
10
+ def caption(img, min_len, max_len):
11
+ raw_image = Image.open(img).convert('RGB')
12
+
13
+ inputs = processor(raw_image, return_tensors="pt")
14
+
15
+ out = model.generate(**inputs, min_length=min_len, max_length=max_len)
16
+ return processor.decode(out[0], skip_special_tokens=True)
17
+
18
+ def greet(img, min_len, max_len):
19
+ start = time.time()
20
+ result = caption(img, min_len, max_len)
21
+ end = time.time()
22
+ total_time = str(end - start)
23
+ result = result + '\n' + total_time + ' seconds'
24
+ return result
25
+
26
+ iface = gr.Interface(fn=greet,
27
+ title='Image Captioning with BLIP',
28
+ description="Generate captions for images using the BLIP model.",
29
+ inputs=[gr.Image(type='filepath', label='Image'),
30
+ gr.Slider(label='Minimum Length', minimum=1, maximum=1000, value=30),
31
+ gr.Slider(label='Maximum Length', minimum=1, maximum=1000, value=100)],
32
+ outputs=gr.Textbox(label='Caption'),
33
+ theme = gr.themes.Base(primary_hue="teal",secondary_hue="teal",neutral_hue="slate"))
34
+
35
+ # Add examples
36
+ iface.examples = [
37
+ ["images/1.png", 30, 100],
38
+ ["images/2.png", 30, 100],
39
+ ["images/3.png", 30, 100]
40
+ ]
41
+
42
  iface.launch()