Text Generation
Transformers
Safetensors
llama
text-generation-inference
Inference Endpoints
mfromm commited on
Commit
f747383
·
verified ·
1 Parent(s): 56cb136

Update gptx_tokenizer.py

Browse files
Files changed (1) hide show
  1. gptx_tokenizer.py +411 -29
gptx_tokenizer.py CHANGED
@@ -1,40 +1,422 @@
1
- """
2
- This module supplies `transformers`-compatible wrappers for
3
- `GPTXTokenizer`s.
4
-
5
- The tokenizers in this do not conform to the `PreTrainedTokenizer` API,
6
- but allow for better practical usage.
7
- """
8
-
9
- from typing import List
10
-
11
- try:
12
- from gptxdata.tokenization.hf_wrappers import (
13
- HFTokenizer as _HFTokenizer,
14
- SPTokenizer as _SPTokenizer,
15
- )
16
- except ImportError:
17
- from gptx_tokenizer.hf_wrappers import (
18
- HFTokenizer as _HFTokenizer,
19
- SPTokenizer as _SPTokenizer,
20
- )
21
-
22
- class HFTokenizer(_HFTokenizer):
23
- # The tokenizer is ridiculously slow without this; however, this
24
- # doesn't implement all APIs of `PreTrainedTokenizer`.
25
- def encode(self, text: str, **kwargs) -> List[int]:
26
- return_tokens = kwargs.pop('return_tokens', False)
27
- return self._tok.encode(text, return_tokens=return_tokens)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- class SPTokenizer(_SPTokenizer):
 
 
 
 
 
 
 
 
 
 
 
31
  # `is_continuation` does not work without this, but it doesn't
32
  # implement all APIs of `PreTrainedTokenizer`.
33
  def encode(self, text: str, **kwargs) -> List[int]:
34
  return_tokens = kwargs.pop('return_tokens', False)
35
  is_continuation = kwargs.pop('is_continuation', False)
36
- return self._tok.encode(
37
  text,
38
  return_tokens=return_tokens,
39
  is_continuation=is_continuation,
40
  )
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import warnings
6
+ from pathlib import Path
7
+ from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
8
+
9
+ import sentencepiece as spm
10
+ from huggingface_hub import hf_hub_download, list_repo_files
11
+ from transformers.tokenization_utils import PreTrainedTokenizer
12
+ from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
13
+
14
+ # Define special tokens used in the tokenizer
15
+ EOD_TOKEN = "<eod>"
16
+ PAD_TOKEN = "<pad>"
17
+ BOS_TOKEN = "<s>"
18
+ EOS_TOKEN = "</s>"
19
+ UNK_TOKEN = "<unk>"
20
+ REPO_ID = "openGPT-X/Teuken-7B-base-v0.4"
21
+
22
+ class HFGPTXTokenizer(PreTrainedTokenizer):
23
+ """
24
+ A custom tokenizer class that extends Hugging Face's PreTrainedTokenizer.
25
+ It is specifically designed to work with SentencePiece models and integrates
26
+ with Hugging Face's tokenizer utilities.
27
+ """
28
+
29
+ model_file_glob = "*tokenizer.json"
30
+ vocab_files_names = {"tokenizer_file": "tokenizer.json"}
31
+ decode_kwargs: List[str] = []
32
+
33
+ def _encode(self, text: str, return_tokens: bool = False, is_continuation: bool = False):
34
+ """
35
+ Encode a given text using the tokenizer.
36
+
37
+ Args:
38
+ text (str): The text to encode.
39
+ return_tokens (bool): If True, returns token strings instead of token IDs.
40
+ is_continuation (bool): If True, uses a continuation tokenizer (if available).
41
+
42
+ Returns:
43
+ List[int] or List[str]: Encoded text as a list of token IDs or token strings.
44
+ """
45
+ assert self.tok is not None, "No tokenizer is currently loaded"
46
+
47
+ # Variant with additional sp processor:
48
+ tokenizer = self.continuation_tokenizer if is_continuation else self.tok
49
+
50
+ if return_tokens:
51
+ return tokenizer.encode_as_pieces(text)
52
+ else:
53
+ return tokenizer.encode(text)
54
+
55
+ def create_list_of_special_tokens(self) -> List[str]:
56
+ """
57
+ Create a list of special tokens, including the BOS, EOS, PAD, EOD tokens,
58
+ and 256 additional placeholder tokens.
59
+
60
+ Returns:
61
+ List[str]: List of special tokens.
62
+ """
63
+ return [self.bos_token, self.eos_token, self.pad_token, self.eod_token] + [
64
+ f"<placeholder_tok_{i}>" for i in range(256)
65
+ ]
66
+
67
+ def find_tokenizer_config(self, config_path: Path, repo_id: str = None) -> Path:
68
+ if repo_id is None:
69
+ raise ValueError("repo_id must be provided if config_path is not a local file")
70
+
71
+ try:
72
+ # List all files in the repo
73
+ repo_files = list_repo_files(repo_id)
74
+
75
+ # Find the tokenizer config file
76
+ tokenizer_files = [f for f in repo_files if f.endswith('tokenizer_config.json')]
77
+ if not tokenizer_files:
78
+ raise FileNotFoundError(f"No tokenizer_config.json file found in repository {repo_id}")
79
+
80
+ # Use the first tokenizer_config.json file found
81
+ tokenizer_config_file = tokenizer_files[0]
82
+ print(f"Found tokenizer config file: {tokenizer_config_file}")
83
+
84
+ # Download the file
85
+ tokenizer_config_file_or_name = hf_hub_download(repo_id=repo_id, filename=tokenizer_config_file)
86
+ print(f"Downloaded tokenizer config file to: {tokenizer_config_file_or_name}")
87
+ return tokenizer_config_file_or_name
88
+ except Exception as e:
89
+ raise OSError(f"Failed to download tokenizer model: {str(e)}")
90
+
91
+ def instantiate_from_file_or_name(self, model_file_or_name: str, repo_id: str = None):
92
+ """
93
+ Load the tokenizer model from a file or download it from a repository.
94
+
95
+ Args:
96
+ model_file_or_name (str): Path to the model file or the model name.
97
+ repo_id (str, optional): Repository ID from which to download the model file.
98
+
99
+ Returns:
100
+ spm.SentencePieceProcessor: Loaded SentencePieceProcessor instance.
101
+
102
+ Raises:
103
+ ValueError: If repo_id is not provided when model_file_or_name is not a file.
104
+ OSError: If the model file cannot be loaded or downloaded.
105
+ """
106
+ if not os.path.isfile(model_file_or_name):
107
+ if repo_id is None:
108
+ raise ValueError("repo_id must be provided if model_file_or_name is not a local file")
109
+
110
+ try:
111
+ # List all files in the repo
112
+ repo_files = list_repo_files(repo_id)
113
+
114
+ # Find the tokenizer model file
115
+ tokenizer_files = [f for f in repo_files if f.endswith('.model')]
116
+ if not tokenizer_files:
117
+ raise FileNotFoundError(f"No .model file found in repository {repo_id}")
118
+
119
+ # Use the first .model file found
120
+ model_file = tokenizer_files[0]
121
+ print(f"Found tokenizer model file: {model_file}")
122
+
123
+ # Download the file
124
+ model_file_or_name = hf_hub_download(repo_id=repo_id, filename=model_file)
125
+ print(f"Downloaded tokenizer model to: {model_file_or_name}")
126
+ except Exception as e:
127
+ raise OSError(f"Failed to download tokenizer model: {str(e)}")
128
+
129
+ try:
130
+ return spm.SentencePieceProcessor(model_file=model_file_or_name)
131
+ except Exception as e:
132
+ raise OSError(f"Failed to load tokenizer model: {str(e)}")
133
+
134
+ def __init__(
135
+ self,
136
+ model_path: Optional[str] = None,
137
+ config_path: Optional[str] = None,
138
+ **kwargs: Any,
139
+ ) -> None:
140
+ """
141
+ Initialize the tokenizer.
142
+
143
+ Args:
144
+ model_path (Optional[str]): Path to the tokenizer model file.
145
+ config_path (Optional[str]): Path to the tokenizer configuration file.
146
+ **kwargs: Additional keyword arguments passed to the superclass.
147
+
148
+ This method also ensures backward compatibility by setting
149
+ `clean_up_tokenization_spaces` to False by default.
150
+ """
151
+ # Prevent cleanup of tokenization spaces to maintain backward compatibility
152
+ self.clean_up_tokenization_spaces = kwargs.setdefault("clean_up_tokenization_spaces", False)
153
+ self.vocab = None
154
+ cp_path = kwargs.get("name_or_path", ".")
155
+ if model_path is None:
156
+ model_path = str(Path(cp_path) / self.vocab_files_names["tokenizer_file"])
157
+ self.tok = self.instantiate_from_file_or_name(model_path, repo_id=REPO_ID)
158
+
159
+ super().__init__(**kwargs)
160
+
161
+ # Specify special tokens which we know the value of.
162
+ # EOD from `tok` is used as what is called EOS in HuggingFace.
163
+ # Since there is no corresponding mapping for EOS from `tok` in
164
+ # HuggingFace, it is treated as an additional special token.
165
+ # Same for all other special tokens.
166
+ self.eos_token = EOD_TOKEN
167
+ self.bos_token = BOS_TOKEN
168
+ self.pad_token = PAD_TOKEN
169
+
170
+ if not self.additional_special_tokens:
171
+ self.additional_special_tokens = [
172
+ token
173
+ for token in self.create_list_of_special_tokens()
174
+ # Filter out the special tokens we added manually.
175
+ if token
176
+ not in [
177
+ self.eos_token,
178
+ self.bos_token,
179
+ self.pad_token,
180
+ ]
181
+ ]
182
+ if config_path is None:
183
+ config_path = str(Path(cp_path) / TOKENIZER_CONFIG_FILE)
184
+
185
+ if os.path.isfile(config_path):
186
+ self.tokenizer_config = self.load_json(Path(config_path))
187
+ else: # Load from repo
188
+ self.tokenizer_config = self.load_json(Path(self.find_tokenizer_config(Path(config_path), repo_id=REPO_ID)))
189
+
190
+ @property
191
+ def vocab_size(self) -> int:
192
+ """
193
+ Get the size of the tokenizer vocabulary.
194
+
195
+ Returns:
196
+ int: The size of the vocabulary.
197
+ """
198
+ return self.tok.GetPieceSize()
199
+
200
+ def get_vocab(self) -> Dict[str, int]:
201
+ """
202
+ Get the vocabulary as a dictionary mapping token strings to their IDs.
203
+
204
+ Returns:
205
+ Dict[str, int]: Vocabulary mapping.
206
+ """
207
+ if self.vocab is None:
208
+ self.vocab = {self.tok.IdToPiece(i): i for i in range(self.vocab_size)}
209
+ return self.vocab
210
+
211
+ def _tokenize(self, text: str, **kwargs) -> List[int]:
212
+ """
213
+ Tokenize the input text.
214
+
215
+ Args:
216
+ text (str): Text to tokenize.
217
+ **kwargs: Additional keyword arguments.
218
+
219
+ Returns:
220
+ List[int]: List of token IDs.
221
+ """
222
+ return_tokens = kwargs.pop("return_tokens", True)
223
+ return self._encode(text, return_tokens=return_tokens, **kwargs)
224
+
225
+ def _convert_token_to_id(self, token: str) -> int:
226
+ """
227
+ Convert a token string to its corresponding ID.
228
+
229
+ Args:
230
+ token (str): The token to convert.
231
+
232
+ Returns:
233
+ int: The token's ID.
234
+
235
+ Raises:
236
+ ValueError: If the token is unknown and cannot be encoded to a single ID.
237
+ """
238
+ return self.tok.PieceToId(token)
239
+
240
+
241
+ def decode(
242
+ self,
243
+ token_ids: Union[List[int], List[List[int]]],
244
+ num_threads: Optional[int] = None,
245
+ ) -> str:
246
+ """
247
+ Decode a list of token IDs into a string.
248
+
249
+ Args:
250
+ token_ids (Union[List[int], List[List[int]]]): List of token IDs or lists of token IDs.
251
+ num_threads (Optional[int]): Number of threads to use for decoding.
252
+
253
+ Returns:
254
+ str: Decoded string.
255
+ """
256
+ return self.tok.decode(input=token_ids, num_threads=num_threads)
257
+
258
+ def _convert_id_to_token(self, index: int) -> str:
259
+ """
260
+ Convert a token ID to its corresponding token string.
261
+
262
+ Args:
263
+ index (int): Token ID.
264
+
265
+ Returns:
266
+ str: Corresponding token string.
267
+ """
268
+ return self.tok.IdToPiece(index)
269
+
270
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
271
+ """
272
+ Convert a list of tokens into a single string.
273
+
274
+ Args:
275
+ tokens (List[str]): List of token strings.
276
+
277
+ Returns:
278
+ str: Concatenated string of tokens.
279
+ """
280
+ return self.tok.DecodePieces(tokens)
281
+
282
+ def _tok_decode(self, token_ids: List[int], **kwargs: Any) -> str:
283
+ """
284
+ Internal method to decode token IDs with additional arguments.
285
+
286
+ Args:
287
+ token_ids (List[int]): List of token IDs.
288
+ **kwargs: Additional arguments to pass to the decode method.
289
+
290
+ Returns:
291
+ str: Decoded string.
292
+
293
+ This method also issues a warning if unsupported arguments are provided.
294
+ """
295
+ passed_kwargs = {key: value for (key, value) in kwargs.items() if key in self.decode_kwargs}
296
+ if len(passed_kwargs) != len(kwargs):
297
+ warnings.warn("silently ignoring some arguments to `decode` due to missing " "support from the tokenizer.")
298
+ text = self.decode(token_ids, **passed_kwargs)
299
+ return text
300
+
301
+ def save_tokenizer(self, save_dir: str) -> None:
302
+ if not os.path.isdir(save_dir):
303
+ print(f"Vocabulary path ({save_dir}) should be a directory")
304
+ return
305
+ out_vocab_file = os.path.join(save_dir, "tokenizer.model")
306
+
307
+ # if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
308
+ # copyfile(self.vocab_file, out_vocab_file)
309
+ # elif not os.path.isfile(self.vocab_file):
310
+ with open(out_vocab_file, "wb") as f:
311
+ content_spiece_model = self.tok.serialized_model_proto()
312
+ f.write(content_spiece_model)
313
+
314
+ return (out_vocab_file,)
315
+
316
+ def _decode(
317
+ self,
318
+ token_ids: List[int],
319
+ skip_special_tokens: bool = False,
320
+ clean_up_tokenization_spaces: bool = None,
321
+ spaces_between_special_tokens: bool = True,
322
+ **kwargs: Any,
323
+ ) -> str:
324
+ text = self._tok_decode(
325
+ token_ids,
326
+ skip_special_tokens=skip_special_tokens,
327
+ spaces_between_special_tokens=spaces_between_special_tokens,
328
+ **kwargs,
329
+ )
330
+
331
+ clean_up_tokenization_spaces = (
332
+ clean_up_tokenization_spaces
333
+ if clean_up_tokenization_spaces is not None
334
+ else self.clean_up_tokenization_spaces
335
+ )
336
+ if clean_up_tokenization_spaces:
337
+ warnings.warn(
338
+ "when cleaning up tokenization spaces, this will not behave "
339
+ "like the original `GPTXTokenizer`., Please supply "
340
+ "`clean_up_tokenization_spaces=False` for decoding."
341
+ )
342
+ clean_text = self.clean_up_tokenization(text)
343
+ return clean_text
344
+ else:
345
+ return text
346
+
347
+ def save_vocabulary(
348
+ self,
349
+ save_directory: str,
350
+ filename_prefix: Optional[str] = None,
351
+ ) -> Tuple[str]:
352
+ filename_prefix = filename_prefix + "-" if filename_prefix else ""
353
+ save_directory = Path(save_directory)
354
+
355
+ self._save_tokenizer_config(save_directory, filename_prefix)
356
+ tokenizer_file_path = self._save_tokenizer(save_directory, filename_prefix)
357
+
358
+ return (tokenizer_file_path,)
359
+
360
+ def _save_tokenizer_config(
361
+ self,
362
+ save_directory: Path,
363
+ filename_prefix: str,
364
+ ) -> str:
365
+ self.save_tokenizer_config(save_directory)
366
+ old_tokenizer_config_path = save_directory / TOKENIZER_CONFIG_FILE
367
+ assert old_tokenizer_config_path.is_file(), "tokenizer config path changed"
368
+ new_tokenizer_config_path = save_directory / (filename_prefix + old_tokenizer_config_path.name)
369
+ old_tokenizer_config_path.replace(new_tokenizer_config_path)
370
+ return str(new_tokenizer_config_path)
371
+
372
+ def _find_tokenizer_files(self, save_directory: Path) -> List[Path]:
373
+ files = list(Path(save_directory).glob(self.model_file_glob))
374
+ return files
375
 
376
+ def _get_tokenizer_file(self, files: List[Path]):
377
+ assert files, "no saved tokenizer file found"
378
+ assert len(files) <= 1, "cannot handle multiple saved tokenizer files"
379
+ return files[0]
380
+
381
+ def _save_tokenizer(
382
+ self,
383
+ save_directory: Path,
384
+ filename_prefix: str,
385
+ ) -> str:
386
+ self.save_tokenizer(str(save_directory))
387
+ tokenizer_files = self._find_tokenizer_files(save_directory)
388
+ old_tokenizer_file_path = self._get_tokenizer_file(tokenizer_files)
389
+ assert old_tokenizer_file_path.is_file(), "could not access saved tokenizer file"
390
+ new_tokenizer_file_path = save_directory / (filename_prefix + self.vocab_files_names["tokenizer_file"])
391
+ old_tokenizer_file_path.replace(new_tokenizer_file_path)
392
+ return str(new_tokenizer_file_path)
393
+
394
+ def save_tokenizer_config(self, save_dir: Path) -> None:
395
+ # convert Path to str
396
+ for k in self.tokenizer_config:
397
+ if isinstance(self.tokenizer_config[k], Path):
398
+ self.tokenizer_config[k] = str(self.tokenizer_config[k])
399
 
400
+ info_file = save_dir / "tokenizer_config.json"
401
+ with info_file.open("w") as f:
402
+ json.dump(self.tokenizer_config, f, indent=4)
403
+
404
+ def load_json(self, path: Path) -> dict:
405
+ with path.open("r") as f:
406
+ return json.load(f)
407
+
408
+ class SPTokenizer(HFGPTXTokenizer):
409
+ model_file_glob = "*tokenizer.model"
410
+ vocab_files_names = {"tokenizer_file": "tokenizer.model"}
411
+ decode_kwargs = ["num_threads"]
412
  # `is_continuation` does not work without this, but it doesn't
413
  # implement all APIs of `PreTrainedTokenizer`.
414
  def encode(self, text: str, **kwargs) -> List[int]:
415
  return_tokens = kwargs.pop('return_tokens', False)
416
  is_continuation = kwargs.pop('is_continuation', False)
417
+ return self._encode(
418
  text,
419
  return_tokens=return_tokens,
420
  is_continuation=is_continuation,
421
  )
422
+