|
from argparse import ArgumentParser
|
|
from sentence_transformers import SentenceTransformer
|
|
import torch
|
|
import json
|
|
import numpy as np
|
|
import os
|
|
import gzip
|
|
import faiss
|
|
import time
|
|
import zstandard
|
|
import torch
|
|
from multiprocessing.pool import ThreadPool, Pool
|
|
from itertools import repeat
|
|
import cohere
|
|
|
|
cohere_key = os.environ["COHERE_API_KEY"]
|
|
co = cohere.Client(cohere_key)
|
|
|
|
faiss.omp_set_num_threads(4)
|
|
|
|
def get_bin_embedding(query, model):
|
|
query_emb = np.asarray(co.embed(texts=[query], model=model, input_type="search_query").embeddings)
|
|
query_emb_bin = np.zeros_like(query_emb, dtype=np.int8)
|
|
query_emb_bin[query_emb > 0] = 1
|
|
query_emb_bin = np.packbits(query_emb_bin, axis=-1)
|
|
return query_emb, query_emb_bin
|
|
|
|
|
|
def search_cluster(args):
|
|
|
|
with open(os.path.join(args['input_path'], f"emb/{args['cid_folder']}/{args['cid']}.npy"), "rb") as fIn:
|
|
cluster_emb = np.load(fIn)
|
|
cluster_index = faiss.IndexBinaryFlat(cluster_emb.shape[1]*8)
|
|
cluster_index.add(cluster_emb)
|
|
|
|
|
|
cluster_scores, cluster_doc_ids = cluster_index.search(args['query_emb_bin'], args['topk'])
|
|
|
|
return [{'cid': args['cid'], 'doc_idx': doc_idx, 'doc_score': doc_score, 'doc_emb': cluster_emb[doc_idx]} for doc_score, doc_idx in zip(cluster_scores[0], cluster_doc_ids[0])]
|
|
|
|
def search(args, centroids, query):
|
|
num_rescore = args.topk*5
|
|
|
|
|
|
start_time = time.time()
|
|
query_emb, query_emb_bin = get_bin_embedding(query, args.model)
|
|
print(f"Query encoding took {(time.time()-start_time)*1000:.2f}ms")
|
|
|
|
start_time = time.time()
|
|
|
|
centroid_scores, centroid_ids = centroids.search(query_emb_bin, args.nprobe)
|
|
centroid_ids = centroid_ids[0]
|
|
print(f"Centroid search took {(time.time()-start_time)*1000:.2f}ms")
|
|
|
|
start_time = time.time()
|
|
all_hits = []
|
|
|
|
|
|
|
|
|
|
pool_args = []
|
|
for cid in centroid_ids:
|
|
cid_str = str(cid.item()).zfill(args.ivf['file_len'])
|
|
cid_folder = cid_str[-args.ivf['folder_len']:]
|
|
pool_args.append({'cid': cid_str, 'cid_folder': cid_folder, 'input_path': args.input, 'topk': args.topk, 'query_emb_bin': query_emb_bin})
|
|
|
|
for result in pool.imap_unordered(search_cluster, pool_args, chunksize=10):
|
|
all_hits.extend(result)
|
|
|
|
|
|
all_hits.sort(key=lambda x: x['doc_score'])
|
|
all_hits = all_hits[0:num_rescore]
|
|
|
|
print(f"Searching in clusters took {(time.time()-start_time)*1000:.2f}ms"); start_time = time.time()
|
|
|
|
|
|
for hit in all_hits:
|
|
doc_emb = hit['doc_emb']
|
|
doc_emb_bin_unpacked = np.unpackbits(doc_emb, axis=-1).astype("int")
|
|
doc_emb_bin_unpacked = 2*doc_emb_bin_unpacked-1
|
|
hit['cont_score'] = (query_emb @ doc_emb_bin_unpacked.T).item()
|
|
|
|
all_hits.sort(key=lambda x: x['cont_score'], reverse=True)
|
|
all_hits = all_hits[0:args.topk]
|
|
print(f"Dense-Binary rescoring took {(time.time()-start_time)*1000:.2f}ms"); start_time = time.time()
|
|
|
|
|
|
|
|
results = []
|
|
|
|
dctx = zstandard.ZstdDecompressor()
|
|
for hit in all_hits:
|
|
text_path = os.path.join(args.input, f"text/{hit['cid'][-args.ivf['folder_len']:]}/{hit['cid']}.jsonl.zst")
|
|
with zstandard.open(text_path, "rt", dctx=dctx) as fIn:
|
|
for line_idx, line in enumerate(fIn):
|
|
if line_idx == hit['doc_idx']:
|
|
data = json.loads(line)
|
|
data['_score'] = hit['cont_score']
|
|
data['_path'] = text_path
|
|
results.append(data)
|
|
break
|
|
|
|
print(f"Fetch docs took {(time.time()-start_time)*1000:.2f}ms")
|
|
|
|
for hit in results[0:3]:
|
|
print(hit)
|
|
print("-------------")
|
|
|
|
|
|
def main():
|
|
parser = ArgumentParser()
|
|
parser.add_argument("--model", default="embed-english-v3.0")
|
|
parser.add_argument("--input", required=True, help="IVF Folder")
|
|
parser.add_argument("--nprobe", type=int, default=100)
|
|
parser.add_argument("--topk", type=int, default=10)
|
|
args = parser.parse_args()
|
|
|
|
|
|
with open(f"{args.input}/config.json") as fIn:
|
|
args.ivf = json.load(fIn)
|
|
|
|
|
|
|
|
with open(os.path.join(args.input, "centroid_vecs.npy"), "rb") as fIn:
|
|
centroid_vec = np.load(fIn)
|
|
print("Centroids shape:", centroid_vec.shape)
|
|
centroids = faiss.IndexBinaryFlat(centroid_vec.shape[1]*8)
|
|
centroids.add(centroid_vec)
|
|
|
|
|
|
while True:
|
|
query = input("Query: ")
|
|
search(args, centroids, query)
|
|
print("\n===========================\n")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pool = Pool(processes=8)
|
|
main() |