Upload 8 files
Browse files- .gitattributes +1 -0
- config.json +9 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- readme.md +202 -0
- tokenizer.json +0 -0
- tokenizer_config.json +20 -0
- train_gpt2_1.py +553 -0
- vocab.json +0 -0
.gitattributes
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"block_size": 768,
|
3 |
+
"dropout": 0.1,
|
4 |
+
"model_type": "custom_gpt",
|
5 |
+
"n_embd": 768,
|
6 |
+
"n_head": 8,
|
7 |
+
"n_layer": 8,
|
8 |
+
"vocab_size": 50304
|
9 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7ef5eb035ca243528a813050affa44572e2f1e0d6e2445639106d7e4be5640e
|
3 |
+
size 383721432
|
readme.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Leap0 Model
|
2 |
+
|
3 |
+
## Model Description
|
4 |
+
|
5 |
+
This is the Leap0 model, designed for text generation tasks. It leverages the GPT-2 tokenizer and architecture but is specifically trained on the Tiny Stories dataset.
|
6 |
+
|
7 |
+
## Model Architecture
|
8 |
+
|
9 |
+
- **Model Type**: GPT-2
|
10 |
+
- **Number of Layers**: 8
|
11 |
+
- **Number of Heads**: 8
|
12 |
+
- **Embedding Size**: 768
|
13 |
+
- **Block Size**: 768
|
14 |
+
- **Vocabulary Size**: 50257
|
15 |
+
- **Dropout Rate**: 0.1
|
16 |
+
- **Attention Mechanism**: Causal Self-Attention
|
17 |
+
- **Encoding**: GPT-2 Tokenizer
|
18 |
+
|
19 |
+
## Training Details
|
20 |
+
|
21 |
+
- **Dataset**: Tiny Stories
|
22 |
+
|
23 |
+
## How to Use
|
24 |
+
# change the input as per your desired string
|
25 |
+
|
26 |
+
"""
|
27 |
+
import torch
|
28 |
+
import json
|
29 |
+
from transformers import GPT2Tokenizer
|
30 |
+
from safetensors.torch import load_file
|
31 |
+
import os
|
32 |
+
import math
|
33 |
+
import time
|
34 |
+
import inspect
|
35 |
+
from dataclasses import dataclass
|
36 |
+
import torch
|
37 |
+
import torch.nn as nn
|
38 |
+
from torch.nn import functional as F
|
39 |
+
from datasets import load_dataset
|
40 |
+
|
41 |
+
# Load the dataset
|
42 |
+
dataset = load_dataset("hellaswag", trust_remote_code=True)
|
43 |
+
print(dataset)
|
44 |
+
|
45 |
+
# Define the CausalSelfAttention class
|
46 |
+
class CausalSelfAttention(nn.Module):
|
47 |
+
def __init__(self, config):
|
48 |
+
super().__init__()
|
49 |
+
assert config.n_embd % config.n_head == 0
|
50 |
+
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
|
51 |
+
self.c_proj = nn.Linear(config.n_embd, config.n_embd)
|
52 |
+
self.c_proj.NANOGPT_SCALE_INIT = 1
|
53 |
+
self.n_head = config.n_head
|
54 |
+
self.n_embd = config.n_embd
|
55 |
+
|
56 |
+
def forward(self, x):
|
57 |
+
B, T, C = x.size()
|
58 |
+
qkv = self.c_attn(x)
|
59 |
+
q, k, v = qkv.split(self.n_embd, dim=2)
|
60 |
+
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
|
61 |
+
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
|
62 |
+
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
|
63 |
+
y = F.scaled_dot_product_attention(q, k, v, is_causal=True)
|
64 |
+
y = y.transpose(1, 2).contiguous().view(B, T, C)
|
65 |
+
y = self.c_proj(y)
|
66 |
+
return y
|
67 |
+
|
68 |
+
# Define the MLP class
|
69 |
+
class MLP(nn.Module):
|
70 |
+
def __init__(self, config):
|
71 |
+
super().__init__()
|
72 |
+
self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
|
73 |
+
self.gelu = nn.GELU(approximate='tanh')
|
74 |
+
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
|
75 |
+
self.c_proj.NANOGPT_SCALE_INIT = 1
|
76 |
+
|
77 |
+
def forward(self, x):
|
78 |
+
x = self.c_fc(x)
|
79 |
+
x = self.gelu(x)
|
80 |
+
x = self.c_proj(x)
|
81 |
+
return x
|
82 |
+
|
83 |
+
# Define the Block class
|
84 |
+
class Block(nn.Module):
|
85 |
+
def __init__(self, config):
|
86 |
+
super().__init__()
|
87 |
+
self.ln_1 = nn.LayerNorm(config.n_embd)
|
88 |
+
self.attn = CausalSelfAttention(config)
|
89 |
+
self.ln_2 = nn.LayerNorm(config.n_embd)
|
90 |
+
self.mlp = MLP(config)
|
91 |
+
|
92 |
+
def forward(self, x):
|
93 |
+
x = x + self.attn(self.ln_1(x))
|
94 |
+
x = x + self.mlp(self.ln_2(x))
|
95 |
+
return x
|
96 |
+
|
97 |
+
# Define the GPTConfig class
|
98 |
+
@dataclass
|
99 |
+
class GPTConfig:
|
100 |
+
block_size: int = 768
|
101 |
+
vocab_size: int = 50257
|
102 |
+
n_layer: int = 8
|
103 |
+
n_head: int = 8
|
104 |
+
n_embd: int = 768
|
105 |
+
dropout: float = 0.1
|
106 |
+
model_type: str = "custom_gpt"
|
107 |
+
|
108 |
+
def to_dict(self):
|
109 |
+
return self.__dict__
|
110 |
+
|
111 |
+
@classmethod
|
112 |
+
def from_dict(cls, config_dict):
|
113 |
+
return cls(**config_dict)
|
114 |
+
|
115 |
+
# Define the GPT class
|
116 |
+
class GPT(nn.Module):
|
117 |
+
def __init__(self, config):
|
118 |
+
super().__init__()
|
119 |
+
self.config = config
|
120 |
+
|
121 |
+
self.transformer = nn.ModuleDict(dict(
|
122 |
+
wte=nn.Embedding(config.vocab_size, config.n_embd),
|
123 |
+
wpe=nn.Embedding(config.block_size, config.n_embd),
|
124 |
+
h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
|
125 |
+
ln_f=nn.LayerNorm(config.n_embd),
|
126 |
+
))
|
127 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
128 |
+
|
129 |
+
# Weight sharing scheme
|
130 |
+
self.transformer.wte.weight = self.lm_head.weight
|
131 |
+
|
132 |
+
# Initialize parameters
|
133 |
+
self.apply(self._init_weights)
|
134 |
+
|
135 |
+
def _init_weights(self, module):
|
136 |
+
if isinstance(module, nn.Linear):
|
137 |
+
std = 0.02
|
138 |
+
if hasattr(module, 'NANOGPT_SCALE_INIT'):
|
139 |
+
std *= (2 * self.config.n_layer) ** -0.5
|
140 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=std)
|
141 |
+
if module.bias is not None:
|
142 |
+
torch.nn.init.zeros_(module.bias)
|
143 |
+
elif isinstance(module, nn.Embedding):
|
144 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
145 |
+
|
146 |
+
def forward(self, idx, targets=None):
|
147 |
+
B, T = idx.size()
|
148 |
+
assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
|
149 |
+
pos = torch.arange(0, T, dtype=torch.long, device=idx.device)
|
150 |
+
pos_emb = self.transformer.wpe(pos)
|
151 |
+
tok_emb = self.transformer.wte(idx)
|
152 |
+
x = tok_emb + pos_emb
|
153 |
+
for block in self.transformer.h:
|
154 |
+
x = block(x)
|
155 |
+
x = self.transformer.ln_f(x)
|
156 |
+
logits = self.lm_head(x)
|
157 |
+
loss = None
|
158 |
+
if targets is not None:
|
159 |
+
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
|
160 |
+
return logits, loss
|
161 |
+
|
162 |
+
# Manually specify the paths to the config and model files
|
163 |
+
config_path = "/home/nll-workstation/Desktop/config.json"
|
164 |
+
model_path = "/home/nll-workstation/Desktop/model.safetensors"
|
165 |
+
|
166 |
+
# Load the configuration from the specified JSON file
|
167 |
+
with open(config_path, "r") as f:
|
168 |
+
config_dict = json.load(f)
|
169 |
+
config = GPTConfig.from_dict(config_dict)
|
170 |
+
|
171 |
+
# Load the model weights from the specified .safetensors file
|
172 |
+
tensors = load_file(model_path)
|
173 |
+
|
174 |
+
# Instantiate the model with the loaded config
|
175 |
+
model = GPT(config)
|
176 |
+
|
177 |
+
# Load the state dict (weights) into the model
|
178 |
+
model.load_state_dict(tensors, strict=False)
|
179 |
+
|
180 |
+
# Set the model to evaluation mode
|
181 |
+
model.eval()
|
182 |
+
|
183 |
+
# Load the tokenizer (same tokenizer used during training)
|
184 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
185 |
+
|
186 |
+
# Prepare input text and tokenize it
|
187 |
+
input_text = "once upon a time in the village of "
|
188 |
+
input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
189 |
+
|
190 |
+
# Run inference (forward pass) through the model
|
191 |
+
logits, _ = model(input_ids) # Forward pass, extract logits from the tuple
|
192 |
+
|
193 |
+
# Get predicted token IDs by taking the argmax of logits
|
194 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
195 |
+
|
196 |
+
# Convert predicted token IDs to text
|
197 |
+
output_text = tokenizer.decode(predicted_ids[0], skip_special_tokens=True)
|
198 |
+
|
199 |
+
# Print input and output
|
200 |
+
print("Input Text:", input_text)
|
201 |
+
print("Output Text:", output_text)
|
202 |
+
"""
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
}
|
12 |
+
},
|
13 |
+
"bos_token": "<|endoftext|>",
|
14 |
+
"clean_up_tokenization_spaces": false,
|
15 |
+
"eos_token": "<|endoftext|>",
|
16 |
+
"extra_special_tokens": {},
|
17 |
+
"model_max_length": 1024,
|
18 |
+
"tokenizer_class": "GPT2Tokenizer",
|
19 |
+
"unk_token": "<|endoftext|>"
|
20 |
+
}
|
train_gpt2_1.py
ADDED
@@ -0,0 +1,553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import time
|
4 |
+
import inspect
|
5 |
+
from dataclasses import dataclass
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
from torch.nn import functional as F
|
9 |
+
from hellaswag import render_example, iterate_examples
|
10 |
+
import pandas as pd
|
11 |
+
import pyarrow.parquet as pq
|
12 |
+
|
13 |
+
# -----------------------------------------------------------------------------
|
14 |
+
|
15 |
+
class CausalSelfAttention(nn.Module):
|
16 |
+
|
17 |
+
def __init__(self, config):
|
18 |
+
super().__init__()
|
19 |
+
assert config.n_embd % config.n_head == 0
|
20 |
+
# key, query, value projections for all heads, but in a batch
|
21 |
+
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
|
22 |
+
# output projection
|
23 |
+
self.c_proj = nn.Linear(config.n_embd, config.n_embd)
|
24 |
+
self.c_proj.NANOGPT_SCALE_INIT = 1
|
25 |
+
# regularization
|
26 |
+
self.n_head = config.n_head
|
27 |
+
self.n_embd = config.n_embd
|
28 |
+
|
29 |
+
def forward(self, x):
|
30 |
+
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
|
31 |
+
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
|
32 |
+
# nh is "number of heads", hs is "head size", and C (number of channels) = nh * hs
|
33 |
+
# e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer
|
34 |
+
qkv = self.c_attn(x)
|
35 |
+
q, k, v = qkv.split(self.n_embd, dim=2)
|
36 |
+
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
|
37 |
+
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
|
38 |
+
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
|
39 |
+
y = F.scaled_dot_product_attention(q, k, v, is_causal=True) # flash attention
|
40 |
+
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
|
41 |
+
# output projection
|
42 |
+
y = self.c_proj(y)
|
43 |
+
return y
|
44 |
+
|
45 |
+
class MLP(nn.Module):
|
46 |
+
|
47 |
+
def __init__(self, config):
|
48 |
+
super().__init__()
|
49 |
+
self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
|
50 |
+
self.gelu = nn.GELU(approximate='tanh')
|
51 |
+
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
|
52 |
+
self.c_proj.NANOGPT_SCALE_INIT = 1
|
53 |
+
|
54 |
+
def forward(self, x):
|
55 |
+
x = self.c_fc(x)
|
56 |
+
x = self.gelu(x)
|
57 |
+
x = self.c_proj(x)
|
58 |
+
return x
|
59 |
+
|
60 |
+
class Block(nn.Module):
|
61 |
+
|
62 |
+
def __init__(self, config):
|
63 |
+
super().__init__()
|
64 |
+
self.ln_1 = nn.LayerNorm(config.n_embd)
|
65 |
+
self.attn = CausalSelfAttention(config)
|
66 |
+
self.ln_2 = nn.LayerNorm(config.n_embd)
|
67 |
+
self.mlp = MLP(config)
|
68 |
+
|
69 |
+
def forward(self, x):
|
70 |
+
x = x + self.attn(self.ln_1(x))
|
71 |
+
x = x + self.mlp(self.ln_2(x))
|
72 |
+
return x
|
73 |
+
|
74 |
+
@dataclass
|
75 |
+
class GPTConfig:
|
76 |
+
block_size: int = 768 # max sequence length
|
77 |
+
vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
|
78 |
+
n_layer: int = 8 # number of layers
|
79 |
+
n_head: int = 8 # number of heads
|
80 |
+
n_embd: int = 768 # embedding dimension
|
81 |
+
dropout: float = 0.1
|
82 |
+
model_type: str = "custom_gpt"
|
83 |
+
|
84 |
+
class GPT(nn.Module):
|
85 |
+
|
86 |
+
def __init__(self, config):
|
87 |
+
super().__init__()
|
88 |
+
self.config = config
|
89 |
+
|
90 |
+
self.transformer = nn.ModuleDict(dict(
|
91 |
+
wte = nn.Embedding(config.vocab_size, config.n_embd),
|
92 |
+
wpe = nn.Embedding(config.block_size, config.n_embd),
|
93 |
+
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
|
94 |
+
ln_f = nn.LayerNorm(config.n_embd),
|
95 |
+
))
|
96 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
97 |
+
|
98 |
+
# weight sharing scheme
|
99 |
+
self.transformer.wte.weight = self.lm_head.weight
|
100 |
+
|
101 |
+
# init params
|
102 |
+
self.apply(self._init_weights)
|
103 |
+
|
104 |
+
def _init_weights(self, module):
|
105 |
+
if isinstance(module, nn.Linear):
|
106 |
+
std = 0.02
|
107 |
+
if hasattr(module, 'NANOGPT_SCALE_INIT'):
|
108 |
+
std *= (2 * self.config.n_layer) ** -0.5
|
109 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=std)
|
110 |
+
if module.bias is not None:
|
111 |
+
torch.nn.init.zeros_(module.bias)
|
112 |
+
elif isinstance(module, nn.Embedding):
|
113 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
114 |
+
|
115 |
+
def forward(self, idx, targets=None):
|
116 |
+
# idx is of shape (B, T)
|
117 |
+
B, T = idx.size()
|
118 |
+
assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
|
119 |
+
# forward the token and posisition embeddings
|
120 |
+
pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
|
121 |
+
pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
|
122 |
+
tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
|
123 |
+
x = tok_emb + pos_emb
|
124 |
+
# forward the blocks of the transformer
|
125 |
+
for block in self.transformer.h:
|
126 |
+
x = block(x)
|
127 |
+
# forward the final layernorm and the classifier
|
128 |
+
x = self.transformer.ln_f(x)
|
129 |
+
logits = self.lm_head(x) # (B, T, vocab_size)
|
130 |
+
loss = None
|
131 |
+
if targets is not None:
|
132 |
+
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
|
133 |
+
return logits, loss
|
134 |
+
|
135 |
+
@classmethod
|
136 |
+
def from_pretrained(cls, model_type):
|
137 |
+
"""Loads pretrained GPT-2 model weights from huggingface"""
|
138 |
+
assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
|
139 |
+
from transformers import GPT2LMHeadModel
|
140 |
+
print("loading weights from pretrained gpt: %s" % model_type)
|
141 |
+
|
142 |
+
# n_layer, n_head and n_embd are determined from model_type
|
143 |
+
config_args = {
|
144 |
+
'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
|
145 |
+
'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
|
146 |
+
'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
|
147 |
+
'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
|
148 |
+
}[model_type]
|
149 |
+
config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
|
150 |
+
config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
|
151 |
+
# create a from-scratch initialized minGPT model
|
152 |
+
config = GPTConfig(**config_args)
|
153 |
+
model = GPT(config)
|
154 |
+
sd = model.state_dict()
|
155 |
+
sd_keys = sd.keys()
|
156 |
+
sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
|
157 |
+
|
158 |
+
# init a huggingface/transformers model
|
159 |
+
model_hf = GPT2LMHeadModel.from_pretrained(model_type)
|
160 |
+
sd_hf = model_hf.state_dict()
|
161 |
+
|
162 |
+
# copy while ensuring all of the parameters are aligned and match in names and shapes
|
163 |
+
sd_keys_hf = sd_hf.keys()
|
164 |
+
sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
|
165 |
+
sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
|
166 |
+
transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
|
167 |
+
# basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
|
168 |
+
# this means that we have to transpose these weights when we import them
|
169 |
+
assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
|
170 |
+
for k in sd_keys_hf:
|
171 |
+
if any(k.endswith(w) for w in transposed):
|
172 |
+
# special treatment for the Conv1D weights we need to transpose
|
173 |
+
assert sd_hf[k].shape[::-1] == sd[k].shape
|
174 |
+
with torch.no_grad():
|
175 |
+
sd[k].copy_(sd_hf[k].t())
|
176 |
+
else:
|
177 |
+
# vanilla copy over the other parameters
|
178 |
+
assert sd_hf[k].shape == sd[k].shape
|
179 |
+
with torch.no_grad():
|
180 |
+
sd[k].copy_(sd_hf[k])
|
181 |
+
|
182 |
+
return model
|
183 |
+
|
184 |
+
def configure_optimizers(self, weight_decay, learning_rate, device_type):
|
185 |
+
# start with all of the candidate parameters (that require grad)
|
186 |
+
param_dict = {pn: p for pn, p in self.named_parameters()}
|
187 |
+
param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
|
188 |
+
# create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
|
189 |
+
# i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
|
190 |
+
decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
|
191 |
+
nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
|
192 |
+
optim_groups = [
|
193 |
+
{'params': decay_params, 'weight_decay': weight_decay},
|
194 |
+
{'params': nodecay_params, 'weight_decay': 0.0}
|
195 |
+
]
|
196 |
+
num_decay_params = sum(p.numel() for p in decay_params)
|
197 |
+
num_nodecay_params = sum(p.numel() for p in nodecay_params)
|
198 |
+
if master_process:
|
199 |
+
print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
|
200 |
+
print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
|
201 |
+
# Create AdamW optimizer and use the fused version if it is available
|
202 |
+
fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
|
203 |
+
use_fused = fused_available and device_type == "cuda"
|
204 |
+
if master_process:
|
205 |
+
print(f"using fused AdamW: {use_fused}")
|
206 |
+
optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=(0.9, 0.95), eps=1e-8, fused=use_fused)
|
207 |
+
return optimizer
|
208 |
+
|
209 |
+
|
210 |
+
# -----------------------------------------------------------------------------
|
211 |
+
import tiktoken
|
212 |
+
import numpy as np
|
213 |
+
|
214 |
+
import pandas as pd
|
215 |
+
import torch
|
216 |
+
from transformers import GPT2Tokenizer
|
217 |
+
|
218 |
+
# Initialize a tokenizer (assuming you're using a GPT-2 tokenizer)
|
219 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
220 |
+
|
221 |
+
def load_tokens(filename, max_length=1024):
|
222 |
+
# Read the Parquet file into a DataFrame
|
223 |
+
df = pd.read_parquet(filename)
|
224 |
+
|
225 |
+
# Assuming the text data is stored in a column named 'text'
|
226 |
+
if 'text' in df.columns:
|
227 |
+
# Tokenize the text data with truncation
|
228 |
+
tokens = df['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, max_length=max_length, truncation=True))
|
229 |
+
# Flatten the list of lists and convert to a PyTorch tensor
|
230 |
+
tokens_flat = [token for sublist in tokens for token in sublist]
|
231 |
+
ptt = torch.tensor(tokens_flat, dtype=torch.long)
|
232 |
+
return ptt
|
233 |
+
else:
|
234 |
+
raise ValueError(f"'text' column not found in {filename}")
|
235 |
+
|
236 |
+
class DataLoaderLite:
|
237 |
+
def __init__(self, B, T, process_rank, num_processes, split):
|
238 |
+
self.B = B
|
239 |
+
self.T = T
|
240 |
+
self.process_rank = process_rank
|
241 |
+
self.num_processes = num_processes
|
242 |
+
assert split in {'train', 'val'}
|
243 |
+
|
244 |
+
# get the shard filenames
|
245 |
+
data_root = "GPT2-TS/ts"
|
246 |
+
shards = os.listdir(data_root)
|
247 |
+
shards = [s for s in shards if split in s]
|
248 |
+
shards = sorted(shards)
|
249 |
+
shards = [os.path.join(data_root, s) for s in shards]
|
250 |
+
self.shards = shards
|
251 |
+
assert len(shards) > 0, f"no shards found for split {split}"
|
252 |
+
if master_process:
|
253 |
+
print(f"found {len(shards)} shards for split {split}")
|
254 |
+
self.reset()
|
255 |
+
|
256 |
+
def reset(self):
|
257 |
+
# state, init at shard zero
|
258 |
+
self.current_shard = 0
|
259 |
+
self.tokens = load_tokens(self.shards[self.current_shard])
|
260 |
+
self.current_position = self.B * self.T * self.process_rank
|
261 |
+
|
262 |
+
def next_batch(self):
|
263 |
+
B, T = self.B, self.T
|
264 |
+
buf = self.tokens[self.current_position : self.current_position+B*T+1]
|
265 |
+
x = (buf[:-1]).view(B, T) # inputs
|
266 |
+
y = (buf[1:]).view(B, T) # targets
|
267 |
+
# advance the position in the tensor
|
268 |
+
self.current_position += B * T * self.num_processes
|
269 |
+
# if loading the next batch would be out of bounds, advance to next shard
|
270 |
+
if self.current_position + (B * T * self.num_processes + 1) > len(self.tokens):
|
271 |
+
self.current_shard = (self.current_shard + 1) % len(self.shards)
|
272 |
+
self.tokens = load_tokens(self.shards[self.current_shard])
|
273 |
+
self.current_position = B * T * self.process_rank
|
274 |
+
return x, y
|
275 |
+
|
276 |
+
# -----------------------------------------------------------------------------
|
277 |
+
# helper function for HellaSwag eval
|
278 |
+
# takes tokens, mask, and logits, returns the index of the completion with the lowest loss
|
279 |
+
|
280 |
+
def get_most_likely_row(tokens, mask, logits):
|
281 |
+
# evaluate the autoregressive loss at all positions
|
282 |
+
shift_logits = (logits[..., :-1, :]).contiguous()
|
283 |
+
shift_tokens = (tokens[..., 1:]).contiguous()
|
284 |
+
flat_shift_logits = shift_logits.view(-1, shift_logits.size(-1))
|
285 |
+
flat_shift_tokens = shift_tokens.view(-1)
|
286 |
+
shift_losses = F.cross_entropy(flat_shift_logits, flat_shift_tokens, reduction='none')
|
287 |
+
shift_losses = shift_losses.view(tokens.size(0), -1)
|
288 |
+
# now get the average loss just for the completion region (where mask == 1), in each row
|
289 |
+
shift_mask = (mask[..., 1:]).contiguous() # we must shift mask, so we start at the last prompt token
|
290 |
+
masked_shift_losses = shift_losses * shift_mask
|
291 |
+
# sum and divide by the number of 1s in the mask
|
292 |
+
sum_loss = masked_shift_losses.sum(dim=1)
|
293 |
+
avg_loss = sum_loss / shift_mask.sum(dim=1)
|
294 |
+
# now we have a loss for each of the 4 completions
|
295 |
+
# the one with the lowest loss should be the most likely
|
296 |
+
pred_norm = avg_loss.argmin().item()
|
297 |
+
return pred_norm
|
298 |
+
|
299 |
+
# -----------------------------------------------------------------------------
|
300 |
+
# simple launch:
|
301 |
+
# python train_gpt2.py
|
302 |
+
# DDP launch for e.g. 8 GPUs:
|
303 |
+
# torchrun --standalone --nproc_per_node=8 train_gpt2.py
|
304 |
+
|
305 |
+
torch.set_num_threads(20) # Set this to the number of CPU cores available
|
306 |
+
torch.set_num_interop_threads(20) # Helps with parallelism in inter-op workloads
|
307 |
+
|
308 |
+
|
309 |
+
# run the training loop
|
310 |
+
from torch.distributed import init_process_group, destroy_process_group
|
311 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
312 |
+
import torch.distributed as dist
|
313 |
+
|
314 |
+
# set up DDP (distributed data parallel).
|
315 |
+
# torchrun command sets the env variables RANK, LOCAL_RANK, and WORLD_SIZE
|
316 |
+
ddp = int(os.environ.get('RANK', -1)) != -1 # is this a ddp run?
|
317 |
+
if ddp:
|
318 |
+
# use of DDP atm demands CUDA, we set the device appropriately according to rank
|
319 |
+
assert torch.cuda.is_available(), "for now i think we need CUDA for DDP"
|
320 |
+
init_process_group(backend='nccl')
|
321 |
+
ddp_rank = int(os.environ['RANK'])
|
322 |
+
ddp_local_rank = int(os.environ['LOCAL_RANK'])
|
323 |
+
ddp_world_size = int(os.environ['WORLD_SIZE'])
|
324 |
+
device = f'cuda:{ddp_local_rank}'
|
325 |
+
torch.cuda.set_device(device)
|
326 |
+
master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
|
327 |
+
else:
|
328 |
+
# vanilla, non-DDP run
|
329 |
+
ddp_rank = 0
|
330 |
+
ddp_local_rank = 0
|
331 |
+
ddp_world_size = 1
|
332 |
+
master_process = True
|
333 |
+
# attempt to autodetect device
|
334 |
+
device = "cpu"
|
335 |
+
if torch.cuda.is_available():
|
336 |
+
device = "cuda"
|
337 |
+
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
338 |
+
device = "mps"
|
339 |
+
print(f"using device: {device}")
|
340 |
+
|
341 |
+
# added after video, pytorch can be serious about it's device vs. device_type distinction
|
342 |
+
device_type = "cuda" # if device.startswith("cuda") else "cpu"
|
343 |
+
|
344 |
+
torch.manual_seed(1337)
|
345 |
+
if torch.cuda.is_available():
|
346 |
+
torch.cuda.manual_seed(1337)
|
347 |
+
|
348 |
+
enc = tiktoken.get_encoding("gpt2")
|
349 |
+
|
350 |
+
total_batch_size = 65536 # 2**19, ~0.5M, in number of tokens
|
351 |
+
B = 32 # micro batch size
|
352 |
+
T = 512 # sequence length
|
353 |
+
assert total_batch_size % (B * T * ddp_world_size) == 0, "make sure total_batch_size is divisible by B * T * ddp_world_size"
|
354 |
+
grad_accum_steps = total_batch_size // (B * T * ddp_world_size)
|
355 |
+
if master_process:
|
356 |
+
print(f"total desired batch size: {total_batch_size}")
|
357 |
+
print(f"=> calculated gradient accumulation steps: {grad_accum_steps}")
|
358 |
+
|
359 |
+
train_loader = DataLoaderLite(B=B, T=T, process_rank=ddp_rank, num_processes=ddp_world_size, split="train")
|
360 |
+
val_loader = DataLoaderLite(B=B, T=T, process_rank=ddp_rank, num_processes=ddp_world_size, split="val")
|
361 |
+
|
362 |
+
torch.set_float32_matmul_precision('high')
|
363 |
+
|
364 |
+
# create model
|
365 |
+
model = GPT(GPTConfig(vocab_size=50304))
|
366 |
+
print(f"Number of layers in the model: {model.config.n_layer}") # If n_layer is present
|
367 |
+
# Or count the layers directly
|
368 |
+
print(f"Number of layers (blocks): {len(model.transformer.h)}")
|
369 |
+
|
370 |
+
# model = GPT.from_pretrained("gpt2") # or init from OpenAI GPT-2
|
371 |
+
model.to(device)
|
372 |
+
use_compile = False # torch.compile interferes with HellaSwag eval and Generation. TODO fix
|
373 |
+
if use_compile:
|
374 |
+
model = torch.compile(model)
|
375 |
+
if ddp:
|
376 |
+
model = DDP(model, device_ids=[ddp_local_rank])
|
377 |
+
raw_model = model.module if ddp else model # always contains the "raw" unwrapped model
|
378 |
+
|
379 |
+
max_lr = 6e-4
|
380 |
+
min_lr = max_lr * 0.1
|
381 |
+
warmup_steps = 715
|
382 |
+
max_steps = 28228 # 19,073 steps is ~1 epoch, if data is 10B tokens and batch size 0.5M tokens
|
383 |
+
def get_lr(it):
|
384 |
+
# 1) linear warmup for warmup_iters steps
|
385 |
+
if it < warmup_steps:
|
386 |
+
return max_lr * (it+1) / warmup_steps
|
387 |
+
# 2) if it > lr_decay_iters, return min learning rate
|
388 |
+
if it > max_steps:
|
389 |
+
return min_lr
|
390 |
+
# 3) in between, use cosine decay down to min learning rate
|
391 |
+
decay_ratio = (it - warmup_steps) / (max_steps - warmup_steps)
|
392 |
+
assert 0 <= decay_ratio <= 1
|
393 |
+
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff starts at 1 and goes to 0
|
394 |
+
return min_lr + coeff * (max_lr - min_lr)
|
395 |
+
|
396 |
+
# optimize!
|
397 |
+
optimizer = raw_model.configure_optimizers(weight_decay=0.1, learning_rate=6e-4, device_type=device_type)
|
398 |
+
|
399 |
+
# create the log directory we will write checkpoints to and log to
|
400 |
+
log_dir = "log"
|
401 |
+
os.makedirs(log_dir, exist_ok=True)
|
402 |
+
log_file = os.path.join(log_dir, f"log.txt")
|
403 |
+
with open(log_file, "w") as f: # open for writing to clear the file
|
404 |
+
pass
|
405 |
+
|
406 |
+
for step in range(max_steps):
|
407 |
+
t0 = time.time()
|
408 |
+
last_step = (step == max_steps - 1)
|
409 |
+
|
410 |
+
# once in a while evaluate our validation loss
|
411 |
+
if step % 250 == 0 or last_step:
|
412 |
+
model.eval()
|
413 |
+
val_loader.reset()
|
414 |
+
with torch.no_grad():
|
415 |
+
val_loss_accum = 0.0
|
416 |
+
val_loss_steps = 20
|
417 |
+
for _ in range(val_loss_steps):
|
418 |
+
x, y = val_loader.next_batch()
|
419 |
+
x, y = x.to(device), y.to(device)
|
420 |
+
with torch.autocast(device_type=device_type, dtype=torch.bfloat16):
|
421 |
+
logits, loss = model(x, y)
|
422 |
+
loss = loss / val_loss_steps
|
423 |
+
val_loss_accum += loss.detach()
|
424 |
+
if ddp:
|
425 |
+
dist.all_reduce(val_loss_accum, op=dist.ReduceOp.AVG)
|
426 |
+
if master_process:
|
427 |
+
print(f"validation loss: {val_loss_accum.item():.4f}")
|
428 |
+
with open(log_file, "a") as f:
|
429 |
+
f.write(f"{step} val {val_loss_accum.item():.4f}\n")
|
430 |
+
if step > 0 and (step % 3000 == 0 or last_step):
|
431 |
+
# optionally write model checkpoints
|
432 |
+
# checkpoint_path = os.path.join(log_dir, f"model_{step:05d}.pt")
|
433 |
+
# checkpoint = {
|
434 |
+
# 'model': raw_model.state_dict(),
|
435 |
+
# 'config': raw_model.config,
|
436 |
+
# 'step': step,
|
437 |
+
# 'val_loss': val_loss_accum.item()
|
438 |
+
# }
|
439 |
+
# # you might also want to add optimizer.state_dict() and
|
440 |
+
# # rng seeds etc., if you wanted to more exactly resume training
|
441 |
+
# torch.save(checkpoint, checkpoint_path)
|
442 |
+
model_path = os.path.join(log_dir, f"model_full_{step:05d}.pt")
|
443 |
+
torch.save(raw_model, model_path)
|
444 |
+
|
445 |
+
# once in a while evaluate hellaswag
|
446 |
+
if (step % 250 == 0 or last_step) and (not use_compile):
|
447 |
+
num_correct_norm = 0
|
448 |
+
num_total = 0
|
449 |
+
for i, example in enumerate(iterate_examples("val")):
|
450 |
+
# only process examples where i % ddp_world_size == ddp_rank
|
451 |
+
if i % ddp_world_size != ddp_rank:
|
452 |
+
continue
|
453 |
+
# render the example into tokens and labels
|
454 |
+
_, tokens, mask, label = render_example(example)
|
455 |
+
tokens = tokens.to(device)
|
456 |
+
mask = mask.to(device)
|
457 |
+
# get the logits
|
458 |
+
with torch.no_grad():
|
459 |
+
with torch.autocast(device_type=device_type, dtype=torch.bfloat16):
|
460 |
+
logits, loss = model(tokens)
|
461 |
+
pred_norm = get_most_likely_row(tokens, mask, logits)
|
462 |
+
num_total += 1
|
463 |
+
num_correct_norm += int(pred_norm == label)
|
464 |
+
# reduce the stats across all processes
|
465 |
+
if ddp:
|
466 |
+
num_total = torch.tensor(num_total, dtype=torch.long, device=device)
|
467 |
+
num_correct_norm = torch.tensor(num_correct_norm, dtype=torch.long, device=device)
|
468 |
+
dist.all_reduce(num_total, op=dist.ReduceOp.SUM)
|
469 |
+
dist.all_reduce(num_correct_norm, op=dist.ReduceOp.SUM)
|
470 |
+
num_total = num_total.item()
|
471 |
+
num_correct_norm = num_correct_norm.item()
|
472 |
+
acc_norm = num_correct_norm / num_total
|
473 |
+
if master_process:
|
474 |
+
print(f"HellaSwag accuracy: {num_correct_norm}/{num_total}={acc_norm:.4f}")
|
475 |
+
with open(log_file, "a") as f:
|
476 |
+
f.write(f"{step} hella {acc_norm:.4f}\n")
|
477 |
+
|
478 |
+
# once in a while generate from the model (except step 0, which is noise)
|
479 |
+
if ((step > 0 and step % 250 == 0) or last_step) and (not use_compile):
|
480 |
+
model.eval()
|
481 |
+
num_return_sequences = 4
|
482 |
+
max_length = 32
|
483 |
+
tokens = enc.encode("Hello, I'm a language model,")
|
484 |
+
tokens = torch.tensor(tokens, dtype=torch.long)
|
485 |
+
tokens = tokens.unsqueeze(0).repeat(num_return_sequences, 1)
|
486 |
+
xgen = tokens.to(device)
|
487 |
+
sample_rng = torch.Generator(device=device)
|
488 |
+
sample_rng.manual_seed(42 + ddp_rank)
|
489 |
+
while xgen.size(1) < max_length:
|
490 |
+
# forward the model to get the logits
|
491 |
+
with torch.no_grad():
|
492 |
+
with torch.autocast(device_type=device_type, dtype=torch.bfloat16):
|
493 |
+
logits, loss = model(xgen) # (B, T, vocab_size)
|
494 |
+
# take the logits at the last position
|
495 |
+
logits = logits[:, -1, :] # (B, vocab_size)
|
496 |
+
# get the probabilities
|
497 |
+
probs = F.softmax(logits, dim=-1)
|
498 |
+
# do top-k sampling of 50 (huggingface pipeline default)
|
499 |
+
# topk_probs here becomes (5, 50), topk_indices is (5, 50)
|
500 |
+
topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)
|
501 |
+
# select a token from the top-k probabilities
|
502 |
+
# note: multinomial does not demand the input to sum to 1
|
503 |
+
ix = torch.multinomial(topk_probs, 1, generator=sample_rng) # (B, 1)
|
504 |
+
# gather the corresponding indices
|
505 |
+
xcol = torch.gather(topk_indices, -1, ix) # (B, 1)
|
506 |
+
# append to the sequence
|
507 |
+
xgen = torch.cat((xgen, xcol), dim=1)
|
508 |
+
# print the generated text
|
509 |
+
for i in range(num_return_sequences):
|
510 |
+
tokens = xgen[i, :max_length].tolist()
|
511 |
+
decoded = enc.decode(tokens)
|
512 |
+
print(f"rank {ddp_rank} sample {i}: {decoded}")
|
513 |
+
|
514 |
+
# do one step of the optimization
|
515 |
+
model.train()
|
516 |
+
optimizer.zero_grad()
|
517 |
+
loss_accum = 0.0
|
518 |
+
for micro_step in range(grad_accum_steps):
|
519 |
+
x, y = train_loader.next_batch()
|
520 |
+
x, y = x.to(device), y.to(device)
|
521 |
+
# added after video, this field is also used by the forward pass.
|
522 |
+
if ddp:
|
523 |
+
model.require_backward_grad_sync = (micro_step == grad_accum_steps - 1)
|
524 |
+
with torch.autocast(device_type=device_type, dtype=torch.bfloat16):
|
525 |
+
logits, loss = model(x, y)
|
526 |
+
# we have to scale the loss to account for gradient accumulation,
|
527 |
+
# because the gradients just add on each successive backward().
|
528 |
+
# addition of gradients corresponds to a SUM in the objective, but
|
529 |
+
# instead of a SUM we want MEAN. Scale the loss here so it comes out right
|
530 |
+
loss = loss / grad_accum_steps
|
531 |
+
loss_accum += loss.detach()
|
532 |
+
loss.backward()
|
533 |
+
if ddp:
|
534 |
+
dist.all_reduce(loss_accum, op=dist.ReduceOp.AVG)
|
535 |
+
norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
|
536 |
+
# determine and set the learning rate for this iteration
|
537 |
+
lr = get_lr(step)
|
538 |
+
for param_group in optimizer.param_groups:
|
539 |
+
param_group['lr'] = lr
|
540 |
+
optimizer.step()
|
541 |
+
if device_type == "cuda":
|
542 |
+
torch.cuda.synchronize() # wait for the GPU to finish work
|
543 |
+
t1 = time.time()
|
544 |
+
dt = t1 - t0 # time difference in seconds
|
545 |
+
tokens_processed = train_loader.B * train_loader.T * grad_accum_steps * ddp_world_size
|
546 |
+
tokens_per_sec = tokens_processed / dt
|
547 |
+
if master_process:
|
548 |
+
print(f"step {step:5d} | loss: {loss_accum.item():.6f} | lr {lr:.4e} | norm: {norm:.4f} | dt: {dt*1000:.2f}ms | tok/sec: {tokens_per_sec:.2f}")
|
549 |
+
with open(log_file, "a") as f:
|
550 |
+
f.write(f"{step} train {loss_accum.item():.6f}\n")
|
551 |
+
|
552 |
+
if ddp:
|
553 |
+
destroy_process_group()
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|