austindavis commited on
Commit
ac72f34
·
verified ·
1 Parent(s): 3b28800

Create prepare_hidden_states.py

Browse files
src/dataset_generation/prepare_hidden_states.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Creates probe training datasets. Output is saved as a csv in the folder `activations/L#/P#` where L and P represent the phase.
3
+
4
+
5
+ Example Usage to generate an activation dataset:
6
+ ```
7
+ python dataset_generation/prepare_hidden_states.py record_activations data/activations-chessgpt2 201301 austindavis/lichess-uci train austindavis/chessgpt2
8
+ ```
9
+
10
+ Example usage to push local activation dataset to huggingface:
11
+
12
+ ```
13
+ for P in (seq 0 2)
14
+ for L in (seq 0 12)
15
+ echo Layer $L Phase $P;
16
+ python dataset_generation/prepare_hidden_states.py push_to_hub data/activations-chessgpt2 austindavis/chessgpt2-hiddenstates -l $L -p $P;
17
+ end
18
+ end
19
+ ```
20
+
21
+ """
22
+
23
+ import argparse
24
+ import os
25
+ import re
26
+ from io import BufferedWriter
27
+ from typing import List, Tuple
28
+
29
+ import chess
30
+ import datasets
31
+ import numpy as np
32
+ import pandas as pd
33
+ import torch
34
+ from tqdm.auto import tqdm
35
+ from transformers import BatchEncoding, GPT2LMHeadModel, PreTrainedTokenizerFast
36
+
37
+ from dataset_generation.command_pattern import AbstractCommand, CommandExecutor
38
+ from modeling.chess_utils import uci_to_board
39
+
40
+ torch._C._set_grad_enabled(False)
41
+
42
+ FenString = str
43
+
44
+
45
+ def main():
46
+ parser = argparse.ArgumentParser()
47
+ executor = CommandExecutor(
48
+ {"record_activations": ActivationDatasetGenerator(), "push_to_hub": HubPusher()}
49
+ )
50
+
51
+ parser = executor.add_commands_to_argparser(parser)
52
+
53
+ args = parser.parse_args()
54
+
55
+ executor.execute_from_args(args, cfg=args)
56
+
57
+
58
+ class HubPusher(AbstractCommand):
59
+ """Pushes hidden state vectors for given layer and phase to the Huggingface 🤗 Hub"""
60
+
61
+ split_name = "train"
62
+
63
+ def add_arguments(self, parser):
64
+ # fmt: off
65
+ parser.add_argument("data_dir", type=str,help="Directory where processed files are saved")
66
+ parser.add_argument("ds_repo", type=str, help="Hf 🤗 repository to which dataset will be published")
67
+ parser.add_argument("-l","--layer", type=int, required=False, help="The layer to process")
68
+ parser.add_argument("-p","--phase", type=int, required=False, help="The phase to process")
69
+ # fmt: on
70
+ return parser
71
+
72
+ def execute(self, cfg: argparse.Namespace):
73
+
74
+ assert cfg.layer is not None
75
+ assert cfg.phase is not None
76
+
77
+ out_dir = lambda L, P: os.path.join(cfg.data_dir, f"L{L}", f"P{P}")
78
+ file_path = lambda L, P: os.path.join(out_dir(L, P), f"dfs-L{L}-P{P}.csv")
79
+
80
+ csv_path = file_path(cfg.layer, cfg.phase)
81
+
82
+ ds = datasets.Dataset.from_csv(csv_path, num_proc=16)
83
+
84
+ def fix_pos_and_data(pos_str: str, data_str: str):
85
+ pos_int = int(re.search(r"\d+", pos_str).group())
86
+ data_str = data_str.replace("\n", " ").strip("[]")
87
+ try:
88
+ np_array = np.fromstring(data_str, sep=" ")
89
+ except ValueError as e:
90
+ print(f"Error parsing: {e}")
91
+ return {"pos": pos_int, "data": np_array}
92
+
93
+ ds = ds.map(fix_pos_and_data, input_columns=["pos", "data"], num_proc=16)
94
+
95
+ config_name = f"layer-{cfg.layer:02}-phase-{cfg.phase}"
96
+ print(f"Pushing {config_name} to hub")
97
+ ds.push_to_hub(cfg.ds_repo, config_name=config_name, split=self.split_name)
98
+
99
+
100
+ class ActivationDatasetGenerator(AbstractCommand):
101
+ """Exports activations in CSV format for all layers and phases."""
102
+
103
+ cfg: argparse.Namespace
104
+
105
+ MOVE_PHASES = [
106
+ WHITE_FROM,
107
+ WHITE_TO,
108
+ WHITE_PROMOTION,
109
+ # BLACK_FROM,
110
+ # BLACK_TO,
111
+ # BLACK_PROMOTION,
112
+ SPECIAL,
113
+ ] = range(4)
114
+ N_PHASES = len(MOVE_PHASES) - 1 # When iterating skip the SPECIAL token
115
+ START_POS = -6 # only capture state of final 6 tokens from an encoding
116
+
117
+ N_LAYERS: int = None
118
+
119
+ def add_arguments(self, parser):
120
+ # fmt: off
121
+ parser.add_argument("data_dir", type=str, help="Directory where processed files are saved.")
122
+ parser.add_argument("ds_config", type=str, help="Hf 🤗 dataset config name (e.g., '202301')")
123
+ parser.add_argument("ds_repo", type=str, help="Hf 🤗 dataset repository name (e.g., 'user/repo')")
124
+ parser.add_argument("ds_split", type=str, help="Hf 🤗 dataset split name (e.g. 'train')")
125
+ parser.add_argument("model_checkpoint", type=str, help="local or Hf 🤗 model used to generate hidden state vectors")
126
+ parser.add_argument("--start_pos", type=int, default=-6, help="Number of steps from the end of the token sequence to process.")
127
+ # fmt: on
128
+ return parser
129
+
130
+ def execute(self, cfg: argparse.Namespace):
131
+
132
+ self.cfg = cfg
133
+
134
+ ########################
135
+ ## Load model & tokenizer
136
+ ########################
137
+
138
+ model = GPT2LMHeadModel.from_pretrained(cfg.model_checkpoint).train(False).to(torch.device("cuda"))
139
+
140
+ self.N_LAYERS = len(model.transformer.h) + 1
141
+ tokenizer: PreTrainedTokenizerFast = PreTrainedTokenizerFast.from_pretrained(cfg.model_checkpoint)
142
+
143
+ ########################
144
+ ## Load dataset and tokenize
145
+ ########################
146
+
147
+ dataset = (
148
+ datasets.load_dataset(cfg.ds_repo, name=cfg.ds_config, split=cfg.ds_split)
149
+ .map(
150
+ # token count estimate based on 3-phases per ply
151
+ lambda t: {"num_tokens": 1 + len(t.split()) * 3},
152
+ input_columns="Transcript",
153
+ num_proc=16,
154
+ )
155
+ .sort("num_tokens", reverse=True)
156
+ .filter(lambda num_tokens: num_tokens < 512, input_columns="num_tokens")
157
+ )
158
+
159
+ ########################
160
+ ## Prepare paths and BufferedWriters
161
+ ########################
162
+ out_dir = lambda L, P: os.path.join(cfg.data_dir, f"L{L}", f"P{P}")
163
+ file_path = lambda L, P: os.path.join(out_dir(L, P), f"dfs-L{L}-P{P}.csv")
164
+
165
+ for L in range(self.N_LAYERS):
166
+ for P in range(self.N_PHASES):
167
+ os.makedirs(out_dir(L, P), exist_ok=True)
168
+
169
+ writers: BufferedWriter = [
170
+ [open(file_path(L, P), "a") for L in range(self.N_LAYERS)] for P in range(self.N_PHASES)
171
+ ]
172
+
173
+ print_headers = True # only once at the start
174
+ batch_size = 32
175
+ for batch_index in tqdm(range(0, len(dataset), batch_size)):
176
+
177
+ batch = dataset[batch_index : batch_index + batch_size]
178
+
179
+ ########################
180
+ ## Process Board state
181
+ ########################
182
+ # transcript = batch["Transcript"]
183
+ # fens = batch["Fens"]
184
+
185
+ encoding = tokenizer.batch_encode_plus(
186
+ batch["Transcript"],
187
+ padding=True,
188
+ truncation=True,
189
+ max_length=1024,
190
+ return_special_tokens_mask=True,
191
+ return_length=True,
192
+ return_attention_mask=True,
193
+ return_token_type_ids=True,
194
+ return_tensors="pt",
195
+ )
196
+
197
+ ########################
198
+ ## Process Hidden States
199
+ ########################
200
+ hidden_states_by_game = self.transcript_to_hidden_states(encoding, model)
201
+
202
+ num_tokens_per_game = encoding.attention_mask.sum(dim=-1)
203
+ seqn_start_pos_idx = num_tokens_per_game + cfg.start_pos
204
+
205
+ selected = torch.stack(
206
+ [
207
+ hidden_states_by_game[i, :, seqn_start_pos_idx[i] : num_tokens_per_game[i]]
208
+ for i in range(batch_size) # TODO raises error on final batch
209
+ ]
210
+ )
211
+
212
+ ########################
213
+ ## Process Board States
214
+ ########################
215
+
216
+ phase_by_pos = [[i % 3 for i in range(t)] for t in num_tokens_per_game]
217
+ try:
218
+ fen_by_pos = [
219
+ (
220
+ ["rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"] * 3
221
+ + [batch["Fens"][game][i // 3] for i in range(token_count - 3)]
222
+ )
223
+ for game, token_count in enumerate(num_tokens_per_game)
224
+ ]
225
+ except:
226
+ # We skip games (actually whole batch) if Fens does not contain the correct number of
227
+ # board states.
228
+ continue
229
+
230
+ fen_by_pos = [fen_by_pos[g][cfg.start_pos :] for g in range(batch_size)]
231
+ phase_by_pos = [phase_by_pos[g][cfg.start_pos :] for g in range(batch_size)]
232
+
233
+ ########################
234
+ ## Export/append to CSV
235
+ ########################
236
+ dfs = list(
237
+ map(
238
+ self.records_to_df,
239
+ selected,
240
+ seqn_start_pos_idx,
241
+ fen_by_pos,
242
+ phase_by_pos,
243
+ batch["Site"],
244
+ )
245
+ )
246
+ df = pd.concat(dfs)
247
+
248
+ for L in range(self.N_LAYERS):
249
+ for P in range(self.N_PHASES):
250
+ LP_subset: pd.DataFrame = df[(df["layer"] == L) & (df["phase"] == P)]
251
+ LP_subset.to_csv(writers[P][L], index=False, header=print_headers)
252
+ print_headers = False
253
+
254
+ def transcript_to_hidden_states(
255
+ self,
256
+ encoding: BatchEncoding,
257
+ model: GPT2LMHeadModel,
258
+ ) -> List[torch.Tensor]:
259
+ """
260
+ Converts a batch of uci transcripts into a list of hidden state tensors of
261
+ shape [batch_size, [n_layer, n_pos, d_model]]
262
+ """
263
+ # forward pass
264
+ outputs = model(**encoding.to("cuda"), output_hidden_states=True)
265
+
266
+ # stack hidden states
267
+ hidden_states = outputs.hidden_states
268
+ hidden_states = torch.stack(hidden_states, dim=1)
269
+
270
+ hidden_states = hidden_states.to("cpu")
271
+ return hidden_states
272
+
273
+ def hidden_states_to_records(
274
+ self, hidden_state_tensors: torch.Tensor, min_pos: int
275
+ ) -> Tuple[tuple, torch.Tensor]:
276
+ r"""Flattens the hidden state tensor into a list of tensors.
277
+ Iteration is like:
278
+ original[L,P] === records[P*9+L]
279
+
280
+ Example::
281
+
282
+ >>> indices, records = hidden_states_to_records(output)
283
+ >>> k = 15
284
+ >>> L, P = indices[k]
285
+ >>> print(f"L: {L}, P: {P}")
286
+ L: 6, P: 1
287
+ >>> print(sum(abs(records[k]-records[P*9+L])))
288
+ tensor(0.)
289
+ >>> print(sum(abs(output[L,P]-records[P*9+L])))
290
+ tensor(0.)
291
+ """
292
+
293
+ n_layer, n_pos, d_model = hidden_state_tensors.shape
294
+ records = hidden_state_tensors.permute(1, 0, 2).reshape(-1, d_model).unbind()
295
+ indices = [(L, P + min_pos) for P in range(n_pos) for L in range(n_layer)]
296
+ return indices, records
297
+
298
+ def trim_hidden_states(
299
+ self, hs: torch.Tensor, pos_start: int = -6, pos_end: int = None
300
+ ) -> Tuple[torch.Tensor, int]:
301
+ n_pos = hs.shape[1]
302
+ hs = hs[:, pos_start:]
303
+ return hs, n_pos + pos_start
304
+
305
+ def diff(self, x):
306
+ return x[1] - x[0]
307
+
308
+ def get_board_fens_by_pos(self, transcript: str, num_tokens: int):
309
+
310
+ board_stack: List[FenString] = uci_to_board(
311
+ transcript.lower(),
312
+ as_board_stack=True,
313
+ force=False,
314
+ verbose=False,
315
+ map_function=chess.Board.fen,
316
+ )
317
+
318
+ fens_by_pos: List[str] = [board_stack[0]] # always include 1st board
319
+ phases_by_pos: List[int] = [self.SPECIAL] # first phase is SPECIAL <|startoftext|> token
320
+
321
+ fens_by_pos += [board_stack[(i // 3)] for i in range(num_tokens - 1)]
322
+ phases_by_pos += [i % 3 for i in range(num_tokens - 1)]
323
+
324
+ return fens_by_pos, phases_by_pos
325
+
326
+ def records_to_df(
327
+ self,
328
+ hidden_states: torch.Tensor,
329
+ seqn_start_pos: Tuple[torch.Tensor],
330
+ fen_by_pos,
331
+ phase_by_pos,
332
+ site,
333
+ ):
334
+
335
+ n_layer, n_pos, d_model = hidden_states.shape
336
+ records: tuple[torch.Tensor] = hidden_states.permute(1, 0, 2).reshape(-1, d_model).unbind()
337
+ indices = [(L, P + seqn_start_pos) for P in range(n_pos) for L in range(n_layer)]
338
+
339
+ df = pd.DataFrame(indices, columns=["layer", "pos"])
340
+ n_layer = max(df["layer"]) + 1
341
+ df["phase"] = [phase_by_pos[i // n_layer] for i in range(len(phase_by_pos) * n_layer)]
342
+ df["site"] = [site] * len(df)
343
+ df["fen"] = [fen_by_pos[i // n_layer] for i in range(len(fen_by_pos) * n_layer)]
344
+ df["data"] = [r.numpy() for r in records]
345
+ return df
346
+
347
+
348
+ if __name__ == "__main__":
349
+ main()