Nguyencent commited on
Commit
0e22d26
·
verified ·
1 Parent(s): f85c70c

Upload imagebind_eval_altered.py

Browse files
vinoground_subset/imagebind_eval_altered.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from imagebind import data
2
+ import torch
3
+ from imagebind.models import imagebind_model
4
+ from imagebind.models.imagebind_model import ModalityType
5
+ import pandas as pd
6
+ import os
7
+
8
+ import argparse
9
+
10
+ # Create an ArgumentParser object
11
+ parser = argparse.ArgumentParser()
12
+
13
+ # Add arguments
14
+ parser.add_argument('--data', type=str, default="./Vinoground", help='Path to Vinoground dataset (from Huggingface)')
15
+
16
+ # Parse arguments
17
+ args = parser.parse_args()
18
+
19
+ data_path = args.data
20
+
21
+ vino = pd.read_csv(os.path.join(data_path, "vinoground_hardest.csv"))
22
+
23
+ num_examples = len(vino.index) # original dataset was 500, but this should be 215 after filtering
24
+
25
+ # print("num examples:", num_examples) # debug
26
+
27
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
28
+
29
+ # Instantiate model
30
+ model = imagebind_model.imagebind_huge(True)
31
+ model.eval()
32
+ model.to(device)
33
+
34
+ text_correct = 0
35
+ video_correct = 0
36
+ group_correct = 0
37
+
38
+ from tqdm import tqdm
39
+ for row_num in tqdm(range(num_examples)):
40
+ video_num = vino["index"][row_num] # after filtering, row number changes from original row number. But keep the index value from the original dataset and extract that so we can append the correct video (in original, row number corresponded to video number)
41
+ videos = []
42
+ texts = []
43
+ videos.append(os.path.join(data_path, f"vinoground_videos/{video_num}_pos.mp4"))
44
+ videos.append(os.path.join(data_path, f"vinoground_videos/{video_num}_neg.mp4"))
45
+ texts.append(vino["pos_cap"][row_num])
46
+ texts.append(vino["neg_cap"][row_num])
47
+
48
+ # debug
49
+ # print("row num:", row_num)
50
+ # print("video num:", video_num)
51
+ # print("texts:", texts)
52
+ # print("videos:", videos)
53
+ # debug
54
+
55
+ inputs = {
56
+ ModalityType.TEXT: data.load_and_transform_text(texts, device),
57
+ ModalityType.VISION: data.load_and_transform_video_data(videos, device),
58
+ }
59
+
60
+ with torch.no_grad():
61
+ embeddings = model(inputs)
62
+
63
+ results = embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T
64
+ # print(results)
65
+
66
+ video_correct += results[0][0] > results[1][0] and results[1][1] > results[0][1]
67
+ text_correct += results[0][0] > results[0][1] and results[1][1] > results[1][0]
68
+ group_correct += results[0][0] > results[1][0] and results[1][1] > results[0][1] and results[0][0] > results[0][1] and results[1][1] > results[1][0]
69
+
70
+ print(text_correct / num_examples, video_correct / num_examples, group_correct / num_examples)