nreimers commited on
Commit
3b2f6c9
·
1 Parent(s): 2e6b7dc
Files changed (4) hide show
  1. ivf/centroid_index.bin +3 -0
  2. ivf/centroid_vecs.npy +3 -0
  3. ivf/config.json +1 -0
  4. search.py +132 -0
ivf/centroid_index.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b92fa7c68c49ee38272d1b7c55062991329bae0a72433f1b084cf023918a12c
3
+ size 1168203954
ivf/centroid_vecs.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2db546fe1f71ed76004d4aa20ff432c64ad3e3c2a52aefb0b86a19582cc266e
3
+ size 128020480
ivf/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"zfill": 9, "folder": 3, "model": "embed-english-v3.0"}
search.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+ import json
3
+ import numpy as np
4
+ import os
5
+ import faiss
6
+ import time
7
+ from multiprocessing.pool import Pool
8
+ import cohere
9
+ import zstandard
10
+
11
+ cohere_key = os.environ["COHERE_API_KEY"]
12
+ co = cohere.Client(cohere_key)
13
+
14
+ faiss.omp_set_num_threads(4)
15
+
16
+ def get_bin_embedding(query, model):
17
+ query_emb = np.asarray(co.embed(texts=[query], model=model, input_type="search_query").embeddings)
18
+ query_emb_bin = np.zeros_like(query_emb, dtype=np.int8)
19
+ query_emb_bin[query_emb > 0] = 1
20
+ query_emb_bin = np.packbits(query_emb_bin, axis=-1)
21
+ return query_emb, query_emb_bin
22
+
23
+
24
+
25
+ def search_cluster(args):
26
+ #1) Open Centroid
27
+ with open(os.path.join(args['input_path'], f"emb/{args['cid_folder']}/{args['cid']}.npy"), "rb") as fIn:
28
+ cluster_emb = np.load(fIn)
29
+ cluster_index = faiss.IndexBinaryFlat(cluster_emb.shape[1]*8)
30
+ cluster_index.add(cluster_emb)
31
+
32
+ #2) Search
33
+ cluster_scores, cluster_doc_ids = cluster_index.search(args['query_emb_bin'], args['topk'])
34
+
35
+ return [{'cid': args['cid'], 'doc_idx': doc_idx, 'doc_score': doc_score, 'doc_emb': cluster_emb[doc_idx]} for doc_score, doc_idx in zip(cluster_scores[0], cluster_doc_ids[0])]
36
+
37
+ def search(args, centroids, query):
38
+ num_rescore = args.topk*5
39
+
40
+ #Query encoding
41
+ start_time = time.time()
42
+ query_emb, query_emb_bin = get_bin_embedding(query, args.model)
43
+ print(f"Query encoding took {(time.time()-start_time)*1000:.2f}ms")
44
+
45
+ start_time = time.time()
46
+ #Search nprobe closest centroids
47
+ centroid_scores, centroid_ids = centroids.search(query_emb_bin, args.nprobe)
48
+ centroid_ids = centroid_ids[0]
49
+ print(f"Centroid search took {(time.time()-start_time)*1000:.2f}ms")
50
+
51
+ start_time = time.time()
52
+ all_hits = []
53
+
54
+ #for cid in centroid_ids:
55
+ # global_scores.extend(search_cluster(cid, query_emb_bin))
56
+
57
+ pool_args = []
58
+ for cid in centroid_ids:
59
+ cid_str = str(cid.item()).zfill(args.ivf['zfill'])
60
+ cid_folder = cid_str[-args.ivf['folder']:]
61
+ pool_args.append({'cid': cid_str, 'cid_folder': cid_folder, 'input_path': args.input, 'topk': args.topk, 'query_emb_bin': query_emb_bin})
62
+
63
+ for result in pool.imap_unordered(search_cluster, pool_args, chunksize=10):
64
+ all_hits.extend(result)
65
+
66
+ #Sort global scores
67
+ all_hits.sort(key=lambda x: x['doc_score'])
68
+ all_hits = all_hits[0:num_rescore]
69
+
70
+ print(f"Searching in clusters took {(time.time()-start_time)*1000:.2f}ms"); start_time = time.time()
71
+
72
+ #Dense - Binary Rescoring
73
+ for hit in all_hits:
74
+ doc_emb = hit['doc_emb']
75
+ doc_emb_bin_unpacked = np.unpackbits(doc_emb, axis=-1).astype("int")
76
+ doc_emb_bin_unpacked = 2*doc_emb_bin_unpacked-1
77
+ hit['cont_score'] = (query_emb @ doc_emb_bin_unpacked.T).item()
78
+
79
+ all_hits.sort(key=lambda x: x['cont_score'], reverse=True)
80
+ all_hits = all_hits[0:args.topk]
81
+ print(f"Dense-Binary rescoring took {(time.time()-start_time)*1000:.2f}ms"); start_time = time.time()
82
+
83
+
84
+ #Fetch documents
85
+ results = []
86
+
87
+ for hit in all_hits:
88
+ with zstandard.open(os.path.join(args.input, f"text/{hit['cid'][-args.ivf['folder']:]}/{hit['cid']}.jsonl.zst"), "rt") as fIn:
89
+ for line_idx, line in enumerate(fIn):
90
+ if line_idx == hit['doc_idx']:
91
+ data = json.loads(line)
92
+ data['_score'] = hit['cont_score']
93
+ results.append(data)
94
+ break
95
+
96
+ print(f"Fetch docs took {(time.time()-start_time)*1000:.2f}ms")
97
+
98
+ for hit in results[0:3]:
99
+ print(hit)
100
+ print("-------------")
101
+
102
+
103
+ def main():
104
+ parser = ArgumentParser()
105
+ parser.add_argument("--model", default="embed-english-v3.0")
106
+ parser.add_argument("--input", required=True, help="IVF Folder")
107
+ parser.add_argument("--nprobe", type=int, default=100)
108
+ parser.add_argument("--topk", type=int, default=10)
109
+ args = parser.parse_args()
110
+
111
+ #Load config
112
+ with open(f"{args.input}/config.json") as fIn:
113
+ args.ivf = json.load(fIn)
114
+
115
+
116
+ #Restore centroid index
117
+ with open(os.path.join(args.input, "centroid_vecs.npy"), "rb") as fIn:
118
+ centroid_vec = np.load(fIn)
119
+ print("Centroids shape:", centroid_vec.shape)
120
+ centroids = faiss.IndexBinaryFlat(centroid_vec.shape[1]*8)
121
+ centroids.add(centroid_vec)
122
+
123
+
124
+ while True:
125
+ query = input("Query: ")
126
+ search(args, centroids, query)
127
+ print("\n===========================\n")
128
+
129
+
130
+ if __name__ == "__main__":
131
+ pool = Pool(processes=8)
132
+ main()