darshanmakwana commited on
Commit
7389251
·
verified ·
1 Parent(s): 69b8f32

Upload infer.py

Browse files
Files changed (1) hide show
  1. infer.py +47 -0
infer.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2LMHeadModel, AutoTokenizer
2
+ import demo_util
3
+ import numpy as np
4
+ import torch
5
+ from PIL import Image
6
+ import os
7
+
8
+ torch.backends.cuda.matmul.allow_tf32 = True
9
+ torch.manual_seed(0)
10
+
11
+ device = "cuda:1"
12
+ dtype = torch.float16
13
+
14
+ config = demo_util.get_config("configs/titok_l32.yaml")
15
+ titok_tokenizer = demo_util.get_titok_tokenizer(config)
16
+ titok_tokenizer = titok_tokenizer.to(device)
17
+ tokenizer = AutoTokenizer.from_pretrained("./image_tokenizer")
18
+ model = GPT2LMHeadModel.from_pretrained("./checkpoint-20000").to(device).to(dtype).eval()
19
+
20
+ def detokenize(tokens):
21
+ encoded_tokens = torch.from_numpy(np.array(tokens).astype(np.int64)).view(1, 1, -1).to(device)
22
+ reconstructed_image = titok_tokenizer.decode_tokens(encoded_tokens)
23
+ reconstructed_image = torch.clamp(reconstructed_image, 0.0, 1.0)
24
+ reconstructed_image = (reconstructed_image * 255.0).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()[0]
25
+ return Image.fromarray(reconstructed_image)
26
+
27
+ prompt = ""
28
+
29
+ inputs = tokenizer(f"{text}<|startofimage|>", return_tensors="pt").to(device)
30
+ input_ids = inputs["input_ids"]
31
+ init = input_ids.shape[-1]
32
+ soi_token = tokenizer.encode("<|image:0|>")[0]
33
+
34
+ for _ in range(33):
35
+ logits = model(input_ids).logits[:, -1, :]
36
+ probas = torch.nn.functional.softmax(logits, dim=-1)
37
+ pred_idx = torch.argmax(probas, dim=-1, keepdim=True)
38
+ input_ids = torch.cat((input_ids, pred_idx), dim=-1)
39
+ tokenizer.decode(input_ids[0])
40
+ tokens = input_ids[:, init:-1].detach().cpu().squeeze().numpy() - soi_token
41
+
42
+ if np.any(tokens < 0) or np.any(tokens >= 4096):
43
+ print("Illegal Image Tokens")
44
+
45
+ else:
46
+ img = detokenize(tokens)
47
+ img.save(f"./out.png")