nreimers commited on
Commit
43238c0
·
1 Parent(s): 9470c8a
Files changed (1) hide show
  1. search.py +139 -0
search.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+ from sentence_transformers import SentenceTransformer
3
+ import torch
4
+ import json
5
+ import numpy as np
6
+ import os
7
+ import gzip
8
+ import faiss
9
+ import time
10
+ import zstandard
11
+ import torch
12
+ from multiprocessing.pool import ThreadPool, Pool
13
+ from itertools import repeat
14
+ import cohere
15
+
16
+ cohere_key = os.environ["COHERE_API_KEY"]
17
+ co = cohere.Client(cohere_key)
18
+
19
+ faiss.omp_set_num_threads(4)
20
+
21
+ def get_bin_embedding(query, model):
22
+ query_emb = np.asarray(co.embed(texts=[query], model=model, input_type="search_query").embeddings)
23
+ query_emb_bin = np.zeros_like(query_emb, dtype=np.int8)
24
+ query_emb_bin[query_emb > 0] = 1
25
+ query_emb_bin = np.packbits(query_emb_bin, axis=-1)
26
+ return query_emb, query_emb_bin
27
+
28
+
29
+ def search_cluster(args):
30
+ #1) Open Centroid
31
+ with open(os.path.join(args['input_path'], f"emb/{args['cid_folder']}/{args['cid']}.npy"), "rb") as fIn:
32
+ cluster_emb = np.load(fIn)
33
+ cluster_index = faiss.IndexBinaryFlat(cluster_emb.shape[1]*8)
34
+ cluster_index.add(cluster_emb)
35
+
36
+ #2) Search
37
+ cluster_scores, cluster_doc_ids = cluster_index.search(args['query_emb_bin'], args['topk'])
38
+
39
+ return [{'cid': args['cid'], 'doc_idx': doc_idx, 'doc_score': doc_score, 'doc_emb': cluster_emb[doc_idx]} for doc_score, doc_idx in zip(cluster_scores[0], cluster_doc_ids[0])]
40
+
41
+ def search(args, centroids, query):
42
+ num_rescore = args.topk*5
43
+
44
+ #Query encoding
45
+ start_time = time.time()
46
+ query_emb, query_emb_bin = get_bin_embedding(query, args.model)
47
+ print(f"Query encoding took {(time.time()-start_time)*1000:.2f}ms")
48
+
49
+ start_time = time.time()
50
+ #Search nprobe closest centroids
51
+ centroid_scores, centroid_ids = centroids.search(query_emb_bin, args.nprobe)
52
+ centroid_ids = centroid_ids[0]
53
+ print(f"Centroid search took {(time.time()-start_time)*1000:.2f}ms")
54
+
55
+ start_time = time.time()
56
+ all_hits = []
57
+
58
+ #for cid in centroid_ids:
59
+ # global_scores.extend(search_cluster(cid, query_emb_bin))
60
+
61
+ pool_args = []
62
+ for cid in centroid_ids:
63
+ cid_str = str(cid.item()).zfill(args.ivf['file_len'])
64
+ cid_folder = cid_str[-args.ivf['folder_len']:]
65
+ pool_args.append({'cid': cid_str, 'cid_folder': cid_folder, 'input_path': args.input, 'topk': args.topk, 'query_emb_bin': query_emb_bin})
66
+
67
+ for result in pool.imap_unordered(search_cluster, pool_args, chunksize=10):
68
+ all_hits.extend(result)
69
+
70
+ #Sort global scores
71
+ all_hits.sort(key=lambda x: x['doc_score'])
72
+ all_hits = all_hits[0:num_rescore]
73
+
74
+ print(f"Searching in clusters took {(time.time()-start_time)*1000:.2f}ms"); start_time = time.time()
75
+
76
+ #Dense - Binary Rescoring
77
+ for hit in all_hits:
78
+ doc_emb = hit['doc_emb']
79
+ doc_emb_bin_unpacked = np.unpackbits(doc_emb, axis=-1).astype("int")
80
+ doc_emb_bin_unpacked = 2*doc_emb_bin_unpacked-1
81
+ hit['cont_score'] = (query_emb @ doc_emb_bin_unpacked.T).item()
82
+
83
+ all_hits.sort(key=lambda x: x['cont_score'], reverse=True)
84
+ all_hits = all_hits[0:args.topk]
85
+ print(f"Dense-Binary rescoring took {(time.time()-start_time)*1000:.2f}ms"); start_time = time.time()
86
+
87
+
88
+ #Fetch documents
89
+ results = []
90
+
91
+ dctx = zstandard.ZstdDecompressor()
92
+ for hit in all_hits:
93
+ text_path = os.path.join(args.input, f"text/{hit['cid'][-args.ivf['folder_len']:]}/{hit['cid']}.jsonl.zst")
94
+ with zstandard.open(text_path, "rt", dctx=dctx) as fIn:
95
+ for line_idx, line in enumerate(fIn):
96
+ if line_idx == hit['doc_idx']:
97
+ data = json.loads(line)
98
+ data['_score'] = hit['cont_score']
99
+ data['_path'] = text_path
100
+ results.append(data)
101
+ break
102
+
103
+ print(f"Fetch docs took {(time.time()-start_time)*1000:.2f}ms")
104
+
105
+ for hit in results[0:3]:
106
+ print(hit)
107
+ print("-------------")
108
+
109
+
110
+ def main():
111
+ parser = ArgumentParser()
112
+ parser.add_argument("--model", default="embed-english-v3.0")
113
+ parser.add_argument("--input", required=True, help="IVF Folder")
114
+ parser.add_argument("--nprobe", type=int, default=100)
115
+ parser.add_argument("--topk", type=int, default=10)
116
+ args = parser.parse_args()
117
+
118
+ #Load config
119
+ with open(f"{args.input}/config.json") as fIn:
120
+ args.ivf = json.load(fIn)
121
+
122
+
123
+ #Restore centroid index
124
+ with open(os.path.join(args.input, "centroid_vecs.npy"), "rb") as fIn:
125
+ centroid_vec = np.load(fIn)
126
+ print("Centroids shape:", centroid_vec.shape)
127
+ centroids = faiss.IndexBinaryFlat(centroid_vec.shape[1]*8)
128
+ centroids.add(centroid_vec)
129
+
130
+
131
+ while True:
132
+ query = input("Query: ")
133
+ search(args, centroids, query)
134
+ print("\n===========================\n")
135
+
136
+
137
+ if __name__ == "__main__":
138
+ pool = Pool(processes=8)
139
+ main()