Update infer/train-index.py
Browse files- infer/train-index.py +36 -36
infer/train-index.py
CHANGED
|
@@ -1,36 +1,36 @@
|
|
| 1 |
-
"""
|
| 2 |
-
|
| 3 |
-
"""
|
| 4 |
-
import faiss, numpy as np, os
|
| 5 |
-
|
| 6 |
-
#
|
| 7 |
-
inp_root = r"E:\codes\py39\dataset\mi\2-co256"
|
| 8 |
-
npys = []
|
| 9 |
-
for name in sorted(list(os.listdir(inp_root))):
|
| 10 |
-
phone = np.load("%s/%s" % (inp_root, name))
|
| 11 |
-
npys.append(phone)
|
| 12 |
-
big_npy = np.concatenate(npys, 0)
|
| 13 |
-
print(big_npy.shape) # (6196072, 192)#fp32#4.43G
|
| 14 |
-
np.save("infer/big_src_feature_mi.npy", big_npy)
|
| 15 |
-
|
| 16 |
-
##################train+add
|
| 17 |
-
# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy")
|
| 18 |
-
print(big_npy.shape)
|
| 19 |
-
index = faiss.index_factory(256, "IVF512,Flat") # mi
|
| 20 |
-
print("training")
|
| 21 |
-
index_ivf = faiss.extract_index_ivf(index) #
|
| 22 |
-
index_ivf.nprobe = 9
|
| 23 |
-
index.train(big_npy)
|
| 24 |
-
faiss.write_index(index, "infer/trained_IVF512_Flat_mi_baseline_src_feat.index")
|
| 25 |
-
print("adding")
|
| 26 |
-
index.add(big_npy)
|
| 27 |
-
faiss.write_index(index, "infer/added_IVF512_Flat_mi_baseline_src_feat.index")
|
| 28 |
-
"""
|
| 29 |
-
|
| 30 |
-
big_src_feature 2.95G
|
| 31 |
-
|
| 32 |
-
big_emb
|
| 33 |
-
|
| 34 |
-
big_emb
|
| 35 |
-
|
| 36 |
-
"""
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Format: directly use cid as the built-in index position; if aid cannot be put in, use the dictionary to search, there are only 50,000 of them anyway
|
| 3 |
+
"""
|
| 4 |
+
import faiss, numpy as np, os
|
| 5 |
+
|
| 6 |
+
# ############ If it is an original feature, you need to write save first
|
| 7 |
+
inp_root = r"E:\codes\py39\dataset\mi\2-co256"
|
| 8 |
+
npys = []
|
| 9 |
+
for name in sorted(list(os.listdir(inp_root))):
|
| 10 |
+
phone = np.load("%s/%s" % (inp_root, name))
|
| 11 |
+
npys.append(phone)
|
| 12 |
+
big_npy = np.concatenate(npys, 0)
|
| 13 |
+
print(big_npy.shape) # (6196072, 192)#fp32#4.43G
|
| 14 |
+
np.save("infer/big_src_feature_mi.npy", big_npy)
|
| 15 |
+
|
| 16 |
+
##################train+add
|
| 17 |
+
# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy")
|
| 18 |
+
print(big_npy.shape)
|
| 19 |
+
index = faiss.index_factory(256, "IVF512,Flat") # mi
|
| 20 |
+
print("training")
|
| 21 |
+
index_ivf = faiss.extract_index_ivf(index) #
|
| 22 |
+
index_ivf.nprobe = 9
|
| 23 |
+
index.train(big_npy)
|
| 24 |
+
faiss.write_index(index, "infer/trained_IVF512_Flat_mi_baseline_src_feat.index")
|
| 25 |
+
print("adding")
|
| 26 |
+
index.add(big_npy)
|
| 27 |
+
faiss.write_index(index, "infer/added_IVF512_Flat_mi_baseline_src_feat.index")
|
| 28 |
+
"""
|
| 29 |
+
Size (all FP32)
|
| 30 |
+
big_src_feature 2.95G
|
| 31 |
+
(3098036, 256)
|
| 32 |
+
big_emb 4.43G
|
| 33 |
+
(6196072, 192)
|
| 34 |
+
The double of big_emb is because the feature needs to be repeated and then pitched
|
| 35 |
+
|
| 36 |
+
"""
|