gewei20 commited on
Commit
671d8e3
·
verified ·
1 Parent(s): 67add1d

Create md_knowledge_base_v1.py

Browse files
Files changed (1) hide show
  1. md_knowledge_base_v1.py +272 -0
md_knowledge_base_v1.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # md_knowledge_base_v1.py
2
+ import os
3
+ import json
4
+ import requests
5
+ import hashlib
6
+ from pathlib import Path
7
+ from typing import List, Dict, Optional
8
+ import time
9
+ from datetime import datetime
10
+
11
+ class MarkdownKnowledgeBase:
12
+ def __init__(self, api_token: str, base_url: str = "https://api.siliconflow.cn/v1"):
13
+ """
14
+ 初始化知识库构建器
15
+
16
+ Args:
17
+ api_token: SiliconFlow API token
18
+ base_url: API 基础URL
19
+ """
20
+ self.api_token = api_token
21
+ self.base_url = base_url
22
+ self.headers = {
23
+ "Authorization": f"Bearer {api_token}",
24
+ "Content-Type": "application/json"
25
+ }
26
+ self.knowledge_base = []
27
+
28
+ def scan_markdown_files(self, folder_path: str) -> List[str]:
29
+ # ... (此函数未改变)
30
+ md_files = []
31
+ folder = Path(folder_path)
32
+ if not folder.exists():
33
+ raise FileNotFoundError(f"文件夹不存在: {folder_path}")
34
+ try:
35
+ for md_file in folder.rglob("*.md"):
36
+ if md_file.is_file():
37
+ file_path = str(md_file.resolve())
38
+ try:
39
+ if os.path.exists(file_path) and os.path.isfile(file_path):
40
+ md_files.append(file_path)
41
+ else:
42
+ print(f"跳过无法访问的文件: {file_path}")
43
+ except Exception as e:
44
+ print(f"跳过问题文件: {md_file} - {e}")
45
+ continue
46
+ except Exception as e:
47
+ print(f"扫描文件夹时出错: {e}")
48
+ print(f"找到 {len(md_files)} 个可访问的 Markdown 文件")
49
+ return md_files
50
+
51
+ def read_markdown_content(self, file_path: str) -> Dict:
52
+ # ... (此函数未改变)
53
+ try:
54
+ file_path = os.path.normpath(file_path)
55
+ if not os.path.exists(file_path):
56
+ print(f"文件不存在: {file_path}")
57
+ return None
58
+ encodings = ['utf-8', 'utf-8-sig', 'gbk', 'cp1252', 'latin1']
59
+ content = None
60
+ used_encoding = None
61
+ for encoding in encodings:
62
+ try:
63
+ with open(file_path, 'r', encoding=encoding) as file:
64
+ content = file.read()
65
+ used_encoding = encoding
66
+ break
67
+ except UnicodeDecodeError:
68
+ continue
69
+ except Exception as e:
70
+ print(f"编码 {encoding} 读取失败: {e}")
71
+ continue
72
+ if content is None:
73
+ print(f"无法读取文件 {file_path}: 所有编码都失败")
74
+ return None
75
+ file_hash = hashlib.md5(content.encode('utf-8')).hexdigest()
76
+ return {
77
+ 'file_path': file_path,
78
+ 'file_name': os.path.basename(file_path),
79
+ 'content': content,
80
+ 'hash': file_hash,
81
+ 'size': len(content),
82
+ 'encoding': used_encoding,
83
+ 'modified_time': datetime.fromtimestamp(os.path.getmtime(file_path)).isoformat()
84
+ }
85
+ except Exception as e:
86
+ print(f"读取文件失败 {file_path}: {e}")
87
+ return None
88
+
89
+ def chunk_text(self, text: str, chunk_size: int = 4096, overlap: int = 400) -> List[str]:
90
+ # ... (默认参数已更新以匹配bge-m3)
91
+ if len(text) <= chunk_size:
92
+ return [text]
93
+ chunks = []
94
+ start = 0
95
+ while start < len(text):
96
+ end = start + chunk_size
97
+ if end < len(text):
98
+ for separator in ['\n\n', '。', '\n', ' ']:
99
+ split_pos = text.rfind(separator, start, end)
100
+ if split_pos > start:
101
+ end = split_pos + len(separator)
102
+ break
103
+ chunk = text[start:end].strip()
104
+ if chunk:
105
+ chunks.append(chunk)
106
+ start = max(start + 1, end - overlap)
107
+ return chunks
108
+
109
+ def get_embeddings(self, texts: List[str], model: str = "BAAI/bge-m3") -> List[List[float]]:
110
+ """
111
+ 获取文本向量
112
+
113
+ Args:
114
+ texts: 文本列表
115
+ model: 嵌入模型名称 - **已更新为 bge-m3**
116
+
117
+ Returns:
118
+ 向量列表
119
+ """
120
+ url = f"{self.base_url}/embeddings"
121
+ embeddings = []
122
+ # **优化**: 增加批处理大小以提高效率,并减少等待时间
123
+ batch_size = 32
124
+ total_batches = (len(texts) + batch_size - 1) // batch_size
125
+
126
+ print(f"开始处理 {len(texts)} 个文本块,分为 {total_batches} 批")
127
+
128
+ for batch_idx in range(0, len(texts), batch_size):
129
+ batch = texts[batch_idx:batch_idx + batch_size]
130
+ current_batch = batch_idx // batch_size + 1
131
+ print(f"处理批次 {current_batch}/{total_batches} ({len(batch)} 个文本)")
132
+ payload = {"model": model, "input": batch, "encoding_format": "float"}
133
+
134
+ max_retries = 3
135
+ for attempt in range(max_retries):
136
+ try:
137
+ response = requests.post(url, json=payload, headers=self.headers, timeout=60) # 增加超时
138
+ response.raise_for_status()
139
+ result = response.json()
140
+ if 'data' in result:
141
+ batch_embeddings = [item['embedding'] for item in result['data']]
142
+ embeddings.extend(batch_embeddings)
143
+ print(f" ✓ 成功获取 {len(batch_embeddings)} 个向量")
144
+ break
145
+ else:
146
+ print(f" ✗ API 返回格式异常: {result}")
147
+ embeddings.extend([[] for _ in batch])
148
+ break
149
+ except requests.exceptions.RequestException as e:
150
+ print(f" ✗ 请求失败 (尝试 {attempt + 1}/{max_retries}): {e}")
151
+ if attempt == max_retries - 1:
152
+ embeddings.extend([[] for _ in batch])
153
+
154
+ if attempt < max_retries - 1:
155
+ time.sleep(2 ** attempt)
156
+
157
+ # **优化**: 缩短请求间隔
158
+ time.sleep(0.1)
159
+
160
+ print(f"向量生成完成: {len([e for e in embeddings if e])} 成功, {len([e for e in embeddings if not e])} 失败")
161
+ return embeddings
162
+
163
+ def rerank_documents(self, query: str, documents: List[str],
164
+ model: str = "BAAI/bge-reranker-v2-m3",
165
+ top_n: int = 10) -> Dict:
166
+ """
167
+ 对文档进行重排 - **已更新为 bge-reranker-v2-m3**
168
+ """
169
+ url = f"{self.base_url}/rerank"
170
+ payload = {
171
+ "model": model, "query": query, "documents": documents,
172
+ "top_n": min(top_n, len(documents)), "return_documents": True
173
+ }
174
+ try:
175
+ response = requests.post(url, json=payload, headers=self.headers)
176
+ response.raise_for_status()
177
+ return response.json()
178
+ except Exception as e:
179
+ print(f"重排失败: {e}")
180
+ return {"results": []}
181
+
182
+ def build_knowledge_base(self, folder_path: str, chunk_size: int = 4096, overlap: int = 400,
183
+ max_files: int = None, sample_mode: str = "random"):
184
+ # ... (此函数未改变逻辑, 但默认参数已更新)
185
+ print("开始构建知识库...")
186
+ md_files = self.scan_markdown_files(folder_path)
187
+ if not md_files:
188
+ print("没有找到可处理的 Markdown 文件")
189
+ return
190
+ if max_files and len(md_files) > max_files:
191
+ print(f"文件数量过多({len(md_files)}),采用{sample_mode}策略选择{max_files}个文件")
192
+ if sample_mode == "random":
193
+ import random
194
+ md_files = random.sample(md_files, max_files)
195
+ elif sample_mode == "largest":
196
+ file_sizes = sorted([(fp, os.path.getsize(fp)) for fp in md_files], key=lambda x: x[1], reverse=True)
197
+ md_files = [fp for fp, _ in file_sizes[:max_files]]
198
+ elif sample_mode == "recent":
199
+ file_times = sorted([(fp, os.path.getmtime(fp)) for fp in md_files], key=lambda x: x[1], reverse=True)
200
+ md_files = [fp for fp, _ in file_times[:max_files]]
201
+ print(f"将处理 {len(md_files)} 个文件")
202
+ all_chunks, chunk_metadata = [], []
203
+ processed_files, skipped_files = 0, 0
204
+ for i, file_path in enumerate(md_files, 1):
205
+ print(f"处理文件 {i}/{len(md_files)}: {os.path.basename(file_path)}")
206
+ file_info = self.read_markdown_content(file_path)
207
+ if not file_info or len(file_info['content'].strip()) < 50:
208
+ skipped_files += 1
209
+ continue
210
+ chunks = self.chunk_text(file_info['content'], chunk_size, overlap)
211
+ processed_files += 1
212
+ for j, chunk in enumerate(chunks):
213
+ if len(chunk.strip()) > 20:
214
+ all_chunks.append(chunk)
215
+ chunk_metadata.append({'file_path': file_info['file_path'], 'file_name': file_info['file_name'], 'chunk_index': j, 'chunk_count': len(chunks), 'file_hash': file_info['hash']})
216
+ print(f"成功处理 {processed_files} 个文件,跳过 {skipped_files} 个文件")
217
+ print(f"总共生成 {len(all_chunks)} 个文本块")
218
+ if not all_chunks:
219
+ print("没有有效的文本块,知识库构建失败")
220
+ return
221
+ print("开始生成向量...")
222
+ embeddings = self.get_embeddings(all_chunks)
223
+ self.knowledge_base = []
224
+ valid_embeddings = 0
225
+ for i, (chunk, embedding, metadata) in enumerate(zip(all_chunks, embeddings, chunk_metadata)):
226
+ if embedding:
227
+ self.knowledge_base.append({'id': len(self.knowledge_base), 'content': chunk, 'embedding': embedding, 'metadata': metadata})
228
+ valid_embeddings += 1
229
+ print(f"知识库构建完成! 有效向量: {valid_embeddings}, 总条目: {len(self.knowledge_base)}")
230
+
231
+ def search(self, query: str, top_k: int = 5, use_rerank: bool = True) -> List[Dict]:
232
+ # ... (此函数未改变)
233
+ if not self.knowledge_base: return []
234
+ query_embedding = self.get_embeddings([query])[0]
235
+ if not query_embedding: return []
236
+ import numpy as np
237
+ query_embedding_norm = np.linalg.norm(query_embedding)
238
+ if query_embedding_norm == 0: return []
239
+ similarities = []
240
+ for item in self.knowledge_base:
241
+ if not item['embedding']:
242
+ similarities.append(0)
243
+ continue
244
+ item_embedding_norm = np.linalg.norm(item['embedding'])
245
+ if item_embedding_norm == 0:
246
+ similarities.append(0)
247
+ else:
248
+ similarity = np.dot(query_embedding, item['embedding']) / (query_embedding_norm * item_embedding_norm)
249
+ similarities.append(similarity)
250
+ top_results_indices = sorted(range(len(similarities)), key=lambda i: similarities[i], reverse=True)[:min(top_k * 3, len(similarities))]
251
+ if use_rerank and len(top_results_indices) > 1:
252
+ documents_to_rerank = [self.knowledge_base[i]['content'] for i in top_results_indices]
253
+ rerank_result = self.rerank_documents(query, documents_to_rerank, top_n=top_k)
254
+ if rerank_result.get('results'):
255
+ final_results = []
256
+ for res in rerank_result['results']:
257
+ original_index = top_results_indices[res['index']]
258
+ item = self.knowledge_base[original_index].copy()
259
+ item['relevance_score'] = res['relevance_score']
260
+ final_results.append(item)
261
+ return final_results[:top_k]
262
+ return [self.knowledge_base[i] for i in top_results_indices[:top_k]]
263
+
264
+ def save_knowledge_base(self, output_path: str):
265
+ with open(output_path, 'w', encoding='utf-8') as f:
266
+ json.dump(self.knowledge_base, f, ensure_ascii=False, indent=2)
267
+ print(f"知识库已保存到: {output_path}")
268
+
269
+ def load_knowledge_base(self, input_path: str):
270
+ with open(input_path, 'r', encoding='utf-8') as f:
271
+ self.knowledge_base = json.load(f)
272
+ print(f"知识库已从 {input_path} 加载,包含 {len(self.knowledge_base)} 个条目")