File size: 926 Bytes
8feee7f
 
5039c41
 
7ab18eb
5039c41
0b63a96
5039c41
 
d3bc1ff
0b63a96
5039c41
7ab18eb
 
8feee7f
5039c41
7ab18eb
d95697d
 
5270787
5039c41
8feee7f
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import pathlib

import gradio as gr
import open_clip
import torch

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model, _, transform = open_clip.create_model_and_transforms(
    "coca_ViT-L-14",
    pretrained="mscoco_finetuned_laion2B-s13B-b90k"
)
model.to(device)


def output_generate(image):
    im = transform(image).unsqueeze(0).to(device)
    with torch.no_grad(), torch.cuda.amp.autocast():
        generated = model.generate(im, seq_len=20)
    return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")


paths = sorted(pathlib.Path("images").glob("*.jpg"))

iface = gr.Interface(
    fn=output_generate,
    inputs=gr.Image(label="Input image", type="pil"),
    outputs=gr.Text(label="Caption output"),
    title="CoCa: Contrastive Captioners are Image-Text Foundation Models",
    examples=[path.as_posix() for path in paths],
)
iface.launch()