austindavis commited on
Commit
2016f6b
·
verified ·
1 Parent(s): ac72f34

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -332
README.md CHANGED
@@ -942,339 +942,10 @@ configs:
942
 
943
  This dataset contains the hidden state tensors of the [austindavis/chessGPT2](https://huggingface.co/austindavis/chessGPT2) model recorded during forward passes over the [lichess-uci-fens dataset](https://huggingface.co/datasets/austindavis/lichess-uci-fens/viewer/201301/train) (config: "201301").
944
 
945
- The generation script was called with:
946
  ```sh
947
- python ~/git/chessold/dataset_generation/prepare_hidden_states.py record_activations data/activations-chessgpt2-fens 201301 austindavis/lichess-uci-fens train austindavis/chessgpt2
 
948
  ```
949
 
950
- The generation script is:
951
- ```python
952
 
953
- import argparse
954
- import os
955
- import re
956
- from io import BufferedWriter
957
- from typing import List, Tuple
958
-
959
- import chess
960
- import datasets
961
- import numpy as np
962
- import pandas as pd
963
- import torch
964
- from tqdm.auto import tqdm
965
- from transformers import BatchEncoding, GPT2LMHeadModel, PreTrainedTokenizerFast
966
-
967
- from dataset_generation.command_pattern import AbstractCommand, CommandExecutor
968
- from modeling.chess_utils import uci_to_board
969
-
970
- torch._C._set_grad_enabled(False)
971
-
972
- FenString = str
973
-
974
-
975
- def main():
976
- parser = argparse.ArgumentParser()
977
- executor = CommandExecutor(
978
- {"record_activations": ActivationDatasetGenerator(), "push_to_hub": HubPusher()}
979
- )
980
-
981
- parser = executor.add_commands_to_argparser(parser)
982
-
983
- args = parser.parse_args()
984
-
985
- executor.execute_from_args(args, cfg=args)
986
-
987
-
988
- class HubPusher(AbstractCommand):
989
- """Pushes hidden state vectors for given layer and phase to the Huggingface 🤗 Hub"""
990
-
991
- split_name = "train"
992
-
993
- def add_arguments(self, parser):
994
- # fmt: off
995
- parser.add_argument("data_dir", type=str,help="Directory where processed files are saved")
996
- parser.add_argument("ds_repo", type=str, help="Hf 🤗 repository to which dataset will be published")
997
- parser.add_argument("-l","--layer", type=int, required=False, help="The layer to process")
998
- parser.add_argument("-p","--phase", type=int, required=False, help="The phase to process")
999
- # fmt: on
1000
- return parser
1001
-
1002
- def execute(self, cfg: argparse.Namespace):
1003
-
1004
- assert cfg.layer is not None
1005
- assert cfg.phase is not None
1006
-
1007
- out_dir = lambda L, P: os.path.join(cfg.data_dir, f"L{L}", f"P{P}")
1008
- file_path = lambda L, P: os.path.join(out_dir(L, P), f"dfs-L{L}-P{P}.csv")
1009
-
1010
- csv_path = file_path(cfg.layer, cfg.phase)
1011
-
1012
- ds = datasets.Dataset.from_csv(csv_path, num_proc=16)
1013
-
1014
- def fix_pos_and_data(pos_str: str, data_str: str):
1015
- pos_int = int(re.search(r"\d+", pos_str).group())
1016
- data_str = data_str.replace("\n", " ").strip("[]")
1017
- try:
1018
- np_array = np.fromstring(data_str, sep=" ")
1019
- except ValueError as e:
1020
- print(f"Error parsing: {e}")
1021
- return {"pos": pos_int, "data": np_array}
1022
-
1023
- ds = ds.map(fix_pos_and_data, input_columns=["pos", "data"], num_proc=16)
1024
-
1025
- config_name = f"layer-{cfg.layer:02}-phase-{cfg.phase}"
1026
- print(f"Pushing {config_name} to hub")
1027
- ds.push_to_hub(cfg.ds_repo, config_name=config_name, split=self.split_name)
1028
-
1029
-
1030
- class ActivationDatasetGenerator(AbstractCommand):
1031
- """Exports activations in CSV format for all layers and phases."""
1032
-
1033
- cfg: argparse.Namespace
1034
-
1035
- MOVE_PHASES = [
1036
- WHITE_FROM,
1037
- WHITE_TO,
1038
- WHITE_PROMOTION,
1039
- # BLACK_FROM,
1040
- # BLACK_TO,
1041
- # BLACK_PROMOTION,
1042
- SPECIAL,
1043
- ] = range(4)
1044
- N_PHASES = len(MOVE_PHASES) - 1 # When iterating skip the SPECIAL token
1045
- START_POS = -6 # only capture state of final 6 tokens from an encoding
1046
-
1047
- N_LAYERS: int = None
1048
-
1049
- def add_arguments(self, parser):
1050
- # fmt: off
1051
- parser.add_argument("data_dir", type=str, help="Directory where processed files are saved.")
1052
- parser.add_argument("ds_config", type=str, help="Hf 🤗 dataset config name (e.g., '202301')")
1053
- parser.add_argument("ds_repo", type=str, help="Hf 🤗 dataset repository name (e.g., 'user/repo')")
1054
- parser.add_argument("ds_split", type=str, help="Hf 🤗 dataset split name (e.g. 'train')")
1055
- parser.add_argument("model_checkpoint", type=str, help="local or Hf 🤗 model used to generate hidden state vectors")
1056
- parser.add_argument("--start_pos", type=int, default=-6, help="Number of steps from the end of the token sequence to process.")
1057
- # fmt: on
1058
- return parser
1059
-
1060
- def execute(self, cfg: argparse.Namespace):
1061
-
1062
- self.cfg = cfg
1063
-
1064
- ########################
1065
- ## Load model & tokenizer
1066
- ########################
1067
-
1068
- model = GPT2LMHeadModel.from_pretrained(cfg.model_checkpoint).train(False).to(torch.device("cuda"))
1069
-
1070
- self.N_LAYERS = len(model.transformer.h) + 1
1071
- tokenizer: PreTrainedTokenizerFast = PreTrainedTokenizerFast.from_pretrained(cfg.model_checkpoint)
1072
-
1073
- ########################
1074
- ## Load dataset and tokenize
1075
- ########################
1076
-
1077
- dataset = (
1078
- datasets.load_dataset(cfg.ds_repo, name=cfg.ds_config, split=cfg.ds_split)
1079
- .map(
1080
- # token count estimate based on 3-phases per ply
1081
- lambda t: {"num_tokens": 1 + len(t.split()) * 3},
1082
- input_columns="Transcript",
1083
- num_proc=16,
1084
- )
1085
- .sort("num_tokens", reverse=True)
1086
- .filter(lambda num_tokens: num_tokens < 512, input_columns="num_tokens")
1087
- )
1088
-
1089
- ########################
1090
- ## Prepare paths and BufferedWriters
1091
- ########################
1092
- out_dir = lambda L, P: os.path.join(cfg.data_dir, f"L{L}", f"P{P}")
1093
- file_path = lambda L, P: os.path.join(out_dir(L, P), f"dfs-L{L}-P{P}.csv")
1094
-
1095
- for L in range(self.N_LAYERS):
1096
- for P in range(self.N_PHASES):
1097
- os.makedirs(out_dir(L, P), exist_ok=True)
1098
-
1099
- writers: BufferedWriter = [
1100
- [open(file_path(L, P), "a") for L in range(self.N_LAYERS)] for P in range(self.N_PHASES)
1101
- ]
1102
-
1103
- print_headers = True # only once at the start
1104
- batch_size = 32
1105
- for batch_index in tqdm(range(0, len(dataset), batch_size)):
1106
-
1107
- batch = dataset[batch_index : batch_index + batch_size]
1108
-
1109
- ########################
1110
- ## Process Board state
1111
- ########################
1112
- # transcript = batch["Transcript"]
1113
- # fens = batch["Fens"]
1114
-
1115
- encoding = tokenizer.batch_encode_plus(
1116
- batch["Transcript"],
1117
- padding=True,
1118
- truncation=True,
1119
- max_length=1024,
1120
- return_special_tokens_mask=True,
1121
- return_length=True,
1122
- return_attention_mask=True,
1123
- return_token_type_ids=True,
1124
- return_tensors="pt",
1125
- )
1126
-
1127
- ########################
1128
- ## Process Hidden States
1129
- ########################
1130
- hidden_states_by_game = self.transcript_to_hidden_states(encoding, model)
1131
-
1132
- num_tokens_per_game = encoding.attention_mask.sum(dim=-1)
1133
- seqn_start_pos_idx = num_tokens_per_game + cfg.start_pos
1134
-
1135
- selected = torch.stack(
1136
- [
1137
- hidden_states_by_game[i, :, seqn_start_pos_idx[i] : num_tokens_per_game[i]]
1138
- for i in range(batch_size) # TODO raises error on final batch
1139
- ]
1140
- )
1141
-
1142
- ########################
1143
- ## Process Board States
1144
- ########################
1145
-
1146
- phase_by_pos = [[i % 3 for i in range(t)] for t in num_tokens_per_game]
1147
- try:
1148
- fen_by_pos = [
1149
- (
1150
- ["rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"] * 3
1151
- + [batch["Fens"][game][i // 3] for i in range(token_count - 3)]
1152
- )
1153
- for game, token_count in enumerate(num_tokens_per_game)
1154
- ]
1155
- except:
1156
- # We skip games (actually whole batch) if Fens does not contain the correct number of
1157
- # board states.
1158
- continue
1159
-
1160
- fen_by_pos = [fen_by_pos[g][cfg.start_pos :] for g in range(batch_size)]
1161
- phase_by_pos = [phase_by_pos[g][cfg.start_pos :] for g in range(batch_size)]
1162
-
1163
- ########################
1164
- ## Export/append to CSV
1165
- ########################
1166
- dfs = list(
1167
- map(
1168
- self.records_to_df,
1169
- selected,
1170
- seqn_start_pos_idx,
1171
- fen_by_pos,
1172
- phase_by_pos,
1173
- batch["Site"],
1174
- )
1175
- )
1176
- df = pd.concat(dfs)
1177
-
1178
- for L in range(self.N_LAYERS):
1179
- for P in range(self.N_PHASES):
1180
- LP_subset: pd.DataFrame = df[(df["layer"] == L) & (df["phase"] == P)]
1181
- LP_subset.to_csv(writers[P][L], index=False, header=print_headers)
1182
- print_headers = False
1183
-
1184
- def transcript_to_hidden_states(
1185
- self,
1186
- encoding: BatchEncoding,
1187
- model: GPT2LMHeadModel,
1188
- ) -> List[torch.Tensor]:
1189
- """
1190
- Converts a batch of uci transcripts into a list of hidden state tensors of
1191
- shape [batch_size, [n_layer, n_pos, d_model]]
1192
- """
1193
- # forward pass
1194
- outputs = model(**encoding.to("cuda"), output_hidden_states=True)
1195
-
1196
- # stack hidden states
1197
- hidden_states = outputs.hidden_states
1198
- hidden_states = torch.stack(hidden_states, dim=1)
1199
-
1200
- hidden_states = hidden_states.to("cpu")
1201
- return hidden_states
1202
-
1203
- def hidden_states_to_records(
1204
- self, hidden_state_tensors: torch.Tensor, min_pos: int
1205
- ) -> Tuple[tuple, torch.Tensor]:
1206
- r"""Flattens the hidden state tensor into a list of tensors.
1207
- Iteration is like:
1208
- original[L,P] === records[P*9+L]
1209
-
1210
- Example::
1211
-
1212
- >>> indices, records = hidden_states_to_records(output)
1213
- >>> k = 15
1214
- >>> L, P = indices[k]
1215
- >>> print(f"L: {L}, P: {P}")
1216
- L: 6, P: 1
1217
- >>> print(sum(abs(records[k]-records[P*9+L])))
1218
- tensor(0.)
1219
- >>> print(sum(abs(output[L,P]-records[P*9+L])))
1220
- tensor(0.)
1221
- """
1222
-
1223
- n_layer, n_pos, d_model = hidden_state_tensors.shape
1224
- records = hidden_state_tensors.permute(1, 0, 2).reshape(-1, d_model).unbind()
1225
- indices = [(L, P + min_pos) for P in range(n_pos) for L in range(n_layer)]
1226
- return indices, records
1227
-
1228
- def trim_hidden_states(
1229
- self, hs: torch.Tensor, pos_start: int = -6, pos_end: int = None
1230
- ) -> Tuple[torch.Tensor, int]:
1231
- n_pos = hs.shape[1]
1232
- hs = hs[:, pos_start:]
1233
- return hs, n_pos + pos_start
1234
-
1235
- def diff(self, x):
1236
- return x[1] - x[0]
1237
-
1238
- def get_board_fens_by_pos(self, transcript: str, num_tokens: int):
1239
-
1240
- board_stack: List[FenString] = uci_to_board(
1241
- transcript.lower(),
1242
- as_board_stack=True,
1243
- force=False,
1244
- verbose=False,
1245
- map_function=chess.Board.fen,
1246
- )
1247
-
1248
- fens_by_pos: List[str] = [board_stack[0]] # always include 1st board
1249
- phases_by_pos: List[int] = [self.SPECIAL] # first phase is SPECIAL <|startoftext|> token
1250
-
1251
- fens_by_pos += [board_stack[(i // 3)] for i in range(num_tokens - 1)]
1252
- phases_by_pos += [i % 3 for i in range(num_tokens - 1)]
1253
-
1254
- return fens_by_pos, phases_by_pos
1255
-
1256
- def records_to_df(
1257
- self,
1258
- hidden_states: torch.Tensor,
1259
- seqn_start_pos: Tuple[torch.Tensor],
1260
- fen_by_pos,
1261
- phase_by_pos,
1262
- site,
1263
- ):
1264
-
1265
- n_layer, n_pos, d_model = hidden_states.shape
1266
- records: tuple[torch.Tensor] = hidden_states.permute(1, 0, 2).reshape(-1, d_model).unbind()
1267
- indices = [(L, P + seqn_start_pos) for P in range(n_pos) for L in range(n_layer)]
1268
-
1269
- df = pd.DataFrame(indices, columns=["layer", "pos"])
1270
- n_layer = max(df["layer"]) + 1
1271
- df["phase"] = [phase_by_pos[i // n_layer] for i in range(len(phase_by_pos) * n_layer)]
1272
- df["site"] = [site] * len(df)
1273
- df["fen"] = [fen_by_pos[i // n_layer] for i in range(len(fen_by_pos) * n_layer)]
1274
- df["data"] = [r.numpy() for r in records]
1275
- return df
1276
-
1277
-
1278
- if __name__ == "__main__":
1279
- main()
1280
- ```
 
942
 
943
  This dataset contains the hidden state tensors of the [austindavis/chessGPT2](https://huggingface.co/austindavis/chessGPT2) model recorded during forward passes over the [lichess-uci-fens dataset](https://huggingface.co/datasets/austindavis/lichess-uci-fens/viewer/201301/train) (config: "201301").
944
 
945
+ This dataset was generated using [prepare_hidden_states.py](https://huggingface.co/datasets/austindavis/chessgpt2-hiddenstates/blob/main/src/dataset_generation/prepare_hidden_states.py) using the following CLI arguments:
946
  ```sh
947
+ cd src
948
+ python dataset_generation/prepare_hidden_states.py record_activations data/activations-chessgpt2-fens 201301 austindavis/lichess-uci-fens train austindavis/chessgpt2
949
  ```
950
 
 
 
951