ykzhang721 commited on
Commit
ee439ff
·
verified ·
1 Parent(s): 0a4c168

Upload dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset.py +317 -0
dataset.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.data import Dataset
3
+ from tqdm import tqdm
4
+ import copy
5
+ import numpy as np
6
+ import pdb
7
+ import os
8
+ import io
9
+ import json
10
+ import gzip
11
+ import zstandard as zstd
12
+ import numpy as np
13
+ from tqdm import tqdm
14
+ import pandas as pd
15
+
16
+
17
+ class SemiNATForSingleRoundMaskInput(Dataset):
18
+ '''
19
+ Mask掉了所有的输入,只有输出的loss
20
+ '''
21
+
22
+ def __init__(self, tokenizer, datas, max_length, proc):
23
+ self.tokenizer = tokenizer
24
+ self.max_length = max_length
25
+ self.proc = proc
26
+ # 用 apply + 并行加速预处理
27
+ processed = self._vectorized_preprocess(datas)
28
+ self.input_ids = processed["input_ids"]
29
+ self.labels = processed["labels"]
30
+ self.attention_mask = processed["attention_mask"]
31
+ self.slice_indices = processed["slice_indices"]
32
+
33
+ def _vectorized_preprocess(self, datas):
34
+ # 批量预分配内存
35
+ input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64)
36
+ attention_mask = np.zeros((len(datas), self.max_length),
37
+ dtype=np.int64)
38
+ labels = np.full((len(datas), self.max_length), -100, dtype=np.int64)
39
+ slice_indices = np.full((len(datas), self.max_length),
40
+ -1,
41
+ dtype=np.int64)
42
+
43
+ # 批量处理所有行的 messages
44
+ def process_row(row):
45
+ total_inputs = []
46
+ total_labels = []
47
+ sample_slice = []
48
+
49
+ # pdb.set_trace()
50
+ for msg in row['messages']:
51
+ # 批量分词(假设 msg['content'] 是文本列表)
52
+ inputs = self.tokenizer(msg['content'],
53
+ padding=False,
54
+ truncation=False,
55
+ add_special_tokens=False).input_ids
56
+ total_inputs.extend(inputs)
57
+
58
+ if msg['role'] == 'user':
59
+ total_labels.extend(len(inputs) * [-100])
60
+ elif msg['role'] == 'assistant':
61
+ total_labels.extend(inputs)
62
+ sample_slice.extend(msg['split_pos'])
63
+
64
+ # 截断或填充逻辑
65
+ seq_len = min(len(total_inputs), self.max_length)
66
+ # 输入和标签
67
+ input_ids = total_inputs[:self.max_length] + [
68
+ self.tokenizer.pad_token_id
69
+ ] * (self.max_length - seq_len)
70
+ labels = total_labels[:self.max_length] + [-100] * (
71
+ self.max_length - seq_len)
72
+ # attention_mask
73
+ attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
74
+ # slice_indices
75
+ slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
76
+ (self.max_length - len(sample_slice)))
77
+ slice_arr[slice_arr > self.max_length -
78
+ 1] = -1 # 过滤超长位置,这里-1是因为max length是1024,最大index是1023
79
+
80
+ return input_ids, labels, attention_mask, slice_arr
81
+
82
+ # 并行处理所有行(需安装 pandarallel)
83
+ try:
84
+ from pandarallel import pandarallel
85
+ pandarallel.initialize(nb_workers=self.proc, progress_bar=True)
86
+ processed = datas.parallel_apply(process_row, axis=1)
87
+ except ImportError:
88
+ processed = datas.progress_apply(process_row, axis=1) # tqdm 进度条
89
+
90
+ # 合并结果
91
+ # pdb.set_trace()
92
+ for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
93
+ input_ids[idx] = i_ids
94
+ labels[idx] = lbl
95
+ attention_mask[idx] = attn
96
+ slice_indices[idx] = slc
97
+
98
+ return {
99
+ "input_ids": input_ids,
100
+ "labels": labels,
101
+ "attention_mask": attention_mask,
102
+ "slice_indices": slice_indices
103
+ }
104
+
105
+ def __len__(self):
106
+ return len(self.input_ids)
107
+
108
+ def __getitem__(self, index):
109
+ # 直接返回预分配的张量,避免重复转换
110
+ return (torch.as_tensor(self.input_ids[index]),
111
+ torch.as_tensor(self.labels[index]),
112
+ torch.as_tensor(self.attention_mask[index]),
113
+ torch.as_tensor(self.slice_indices[index]))
114
+
115
+
116
+ class SemiNATForSingleRound(Dataset):
117
+
118
+ def __init__(self, tokenizer, datas, max_length, proc):
119
+ self.tokenizer = tokenizer
120
+ self.max_length = max_length
121
+ self.proc = proc
122
+ # 用 apply + 并行加速预处理
123
+ processed = self._vectorized_preprocess(datas)
124
+ self.input_ids = processed["input_ids"]
125
+ self.labels = processed["labels"]
126
+ self.attention_mask = processed["attention_mask"]
127
+ self.slice_indices = processed["slice_indices"]
128
+
129
+ def _vectorized_preprocess(self, datas):
130
+ # 批量预分配内存
131
+ input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64)
132
+ attention_mask = np.zeros((len(datas), self.max_length),
133
+ dtype=np.int64)
134
+ labels = np.full((len(datas), self.max_length), -100, dtype=np.int64)
135
+ slice_indices = np.full((len(datas), self.max_length),
136
+ -1,
137
+ dtype=np.int64)
138
+
139
+ # 批量处理所有行的 messages
140
+ def process_row(row):
141
+ total_inputs = []
142
+ sample_slice = []
143
+
144
+ for msg in row['messages']:
145
+ # 批量分词(假设 msg['content'] 是文本列表)
146
+ inputs = self.tokenizer(msg['content'],
147
+ padding=False,
148
+ truncation=False,
149
+ add_special_tokens=False).input_ids
150
+ total_inputs.extend(inputs)
151
+ # 直接使用列表扩展 slice
152
+ sample_slice.extend(msg['split_pos'])
153
+
154
+ # 截断或填充逻辑
155
+ seq_len = min(len(total_inputs), self.max_length)
156
+ # 输入和标签
157
+ input_ids = total_inputs[:self.max_length] + [
158
+ self.tokenizer.pad_token_id
159
+ ] * (self.max_length - seq_len)
160
+ labels = total_inputs[:self.max_length] + [-100] * (
161
+ self.max_length - seq_len)
162
+ # attention_mask
163
+ attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
164
+ # slice_indices
165
+ slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
166
+ (self.max_length - len(sample_slice)))
167
+ slice_arr[slice_arr > self.max_length] = -1 # 过滤超长位置
168
+
169
+ return input_ids, labels, attention_mask, slice_arr
170
+
171
+ # 并行处理所有行(需安装 pandarallel)
172
+ try:
173
+ from pandarallel import pandarallel
174
+ pandarallel.initialize(nb_workers=self.proc, progress_bar=True)
175
+ processed = datas.parallel_apply(process_row, axis=1)
176
+ except:
177
+ processed = datas.progress_apply(process_row, axis=1) # tqdm 进度条
178
+
179
+ # 合并结果
180
+ for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
181
+ input_ids[idx] = i_ids
182
+ labels[idx] = lbl
183
+ attention_mask[idx] = attn
184
+ slice_indices[idx] = slc
185
+
186
+ return {
187
+ "input_ids": input_ids,
188
+ "labels": labels,
189
+ "attention_mask": attention_mask,
190
+ "slice_indices": slice_indices
191
+ }
192
+
193
+ def __len__(self):
194
+ return len(self.input_ids)
195
+
196
+ def __getitem__(self, index):
197
+ # 直接返回预分配的张量,避免重复转换
198
+ return (torch.as_tensor(self.input_ids[index]),
199
+ torch.as_tensor(self.labels[index]),
200
+ torch.as_tensor(self.attention_mask[index]),
201
+ torch.as_tensor(self.slice_indices[index]))
202
+
203
+
204
+ class SemiNATDatasetForPretrain(Dataset):
205
+ # data_path is jsonl.zstd or json.gz file
206
+ def __init__(self,
207
+ tokenizer,
208
+ data_path,
209
+ max_length,
210
+ proc,
211
+ cache_path=None):
212
+ if data_path.endswith('.zstd'):
213
+ data = pd.DataFrame([
214
+ json.loads(line) for line in self._decompress_zst_to_string(
215
+ data_path).splitlines()
216
+ ])
217
+ else: # json.gz file, each line a json
218
+ with gzip.open(data_path, 'rt', encoding='utf-8') as f:
219
+ data = pd.DataFrame([json.loads(line) for line in f])
220
+
221
+ self.tokenizer = tokenizer
222
+ self.max_length = max_length
223
+ self.proc = proc
224
+
225
+ if cache_path and os.path.exists(cache_path):
226
+ print(f"[INFO] Loading cached data from {cache_path}")
227
+ cached = torch.load(cache_path)
228
+ self.input_ids = cached["input_ids"]
229
+ self.labels = cached["labels"]
230
+ self.attention_mask = cached["attention_mask"]
231
+ self.slice_indices = cached["slice_indices"]
232
+ else:
233
+ processed = self._vectorized_preprocess(data)
234
+ self.input_ids = processed["input_ids"]
235
+ self.labels = processed["labels"]
236
+ self.attention_mask = processed["attention_mask"]
237
+ self.slice_indices = processed["slice_indices"]
238
+ if type(self.input_ids) != torch.Tensor:
239
+ self.input_ids = torch.tensor(self.input_ids, dtype=torch.long)
240
+ self.labels = torch.tensor(self.labels, dtype=torch.long)
241
+ self.attention_mask = torch.tensor(self.attention_mask,
242
+ dtype=torch.long)
243
+ self.slice_indices = torch.tensor(self.slice_indices,
244
+ dtype=torch.long)
245
+
246
+ def _decompress_zst_to_string(self, input_file):
247
+ with open(input_file, 'rb') as f:
248
+ dctx = zstd.ZstdDecompressor()
249
+ with dctx.stream_reader(f) as reader:
250
+ text_stream = io.TextIOWrapper(reader, encoding='utf-8')
251
+ return text_stream.read() # 读取为字符串
252
+
253
+ def _vectorized_preprocess(self, data):
254
+ input_ids = np.zeros((len(data), self.max_length), dtype=np.int64)
255
+ attention_mask = np.zeros((len(data), self.max_length), dtype=np.int64)
256
+ labels = np.full((len(data), self.max_length), -100, dtype=np.int64)
257
+ slice_indices = np.full((len(data), self.max_length),
258
+ -1,
259
+ dtype=np.int64)
260
+
261
+ def process_row(row):
262
+ inputs = self.tokenizer(row['text'],
263
+ padding=False,
264
+ truncation=False,
265
+ add_special_tokens=False).input_ids
266
+ # slice to 8-token segments. that is, sample_slice is [1, 9, 17, 25, ...]
267
+ sample_slice = (np.arange(0, len(inputs), 8) + 1).tolist()
268
+ # add the end
269
+ if len(inputs) % 8 != 1:
270
+ sample_slice.append(len(inputs))
271
+
272
+ # 截断或填充逻辑
273
+ seq_len = min(len(inputs), self.max_length)
274
+ # 输入和标签
275
+ input_ids = inputs[:self.max_length] + [
276
+ self.tokenizer.pad_token_id
277
+ ] * (self.max_length - seq_len)
278
+ labels = [
279
+ 50279 # <EOS>
280
+ ] + inputs[:self.max_length -
281
+ 1] + [-100] * (self.max_length - 1 - seq_len)
282
+ # attention_mask
283
+ attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
284
+ # slice_indices
285
+ slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
286
+ (self.max_length - len(sample_slice)))
287
+ slice_arr[slice_arr > self.max_length] = -1 # 过滤超长位置
288
+
289
+ return input_ids, labels, attention_mask, slice_arr
290
+
291
+ try:
292
+ from pandarallel import pandarallel
293
+ pandarallel.initialize(nb_workers=self.proc, progress_bar=True)
294
+ processed = data.parallel_apply(process_row, axis=1)
295
+ except ImportError:
296
+ processed = data.progress_apply(process_row, axis=1) # tqdm 进度条
297
+
298
+ # 合并结果
299
+ for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
300
+ input_ids[idx] = i_ids
301
+ labels[idx] = lbl
302
+ attention_mask[idx] = attn
303
+ slice_indices[idx] = slc
304
+
305
+ return {
306
+ "input_ids": input_ids,
307
+ "labels": labels,
308
+ "attention_mask": attention_mask,
309
+ "slice_indices": slice_indices
310
+ }
311
+
312
+ def __len__(self):
313
+ return len(self.input_ids)
314
+
315
+ def __getitem__(self, index):
316
+ return (self.input_ids[index], self.labels[index],
317
+ self.attention_mask[index], self.slice_indices[index])