Update starvector_arch.py
Browse files- starvector_arch.py +31 -60
starvector_arch.py
CHANGED
@@ -35,7 +35,6 @@ class SimpleStarVectorProcessor(ProcessorMixin):
|
|
35 |
self.mean = mean
|
36 |
self.std = std
|
37 |
self.size = size
|
38 |
-
|
39 |
self.normalize = transforms.Normalize(mean=mean, std=std)
|
40 |
|
41 |
self.transform = transforms.Compose([
|
@@ -50,7 +49,7 @@ class SimpleStarVectorProcessor(ProcessorMixin):
|
|
50 |
super().__init__(tokenizer=tokenizer)
|
51 |
|
52 |
|
53 |
-
def __call__(self, images=None, text=None, **kwargs) -> BatchFeature:
|
54 |
"""
|
55 |
Process images and/or text inputs.
|
56 |
|
@@ -65,16 +64,32 @@ class SimpleStarVectorProcessor(ProcessorMixin):
|
|
65 |
image_inputs = {}
|
66 |
if images is not None:
|
67 |
if isinstance(images, (list, tuple)):
|
68 |
-
images_ = [self.transform(img) for img in images]
|
69 |
else:
|
70 |
images_ = self.transform(images)
|
71 |
image_inputs = {"pixel_values": images_}
|
72 |
|
73 |
text_inputs = {}
|
74 |
if text is not None:
|
75 |
-
text_inputs = self.tokenizer(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
return BatchFeature(data={**text_inputs, **image_inputs})
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
AutoProcessor.register(SimpleStarVectorProcessor, SimpleStarVectorProcessor)
|
79 |
|
80 |
|
@@ -128,6 +143,7 @@ class StarVectorForCausalLM(PreTrainedModel):
|
|
128 |
else:
|
129 |
from starvector.model.models.starvector_v1 import StarVectorStarCoder
|
130 |
self.model = StarVectorStarCoder(config=config, **kwargs)
|
|
|
131 |
|
132 |
@property
|
133 |
def supports_gradient_checkpointing(self):
|
@@ -142,70 +158,28 @@ class StarVectorForCausalLM(PreTrainedModel):
|
|
142 |
if hasattr(self.model, 'svg_transformer') and hasattr(self.model.svg_transformer, 'gradient_checkpointing_enable'):
|
143 |
self.model.svg_transformer.gradient_checkpointing_enable()
|
144 |
|
145 |
-
def forward(
|
146 |
-
self,
|
147 |
-
input_ids: Optional[torch.Tensor] = None,
|
148 |
-
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
149 |
-
attention_mask: Optional[torch.Tensor] = None,
|
150 |
-
token_type_ids: Optional[torch.Tensor] = None,
|
151 |
-
position_ids: Optional[torch.Tensor] = None,
|
152 |
-
head_mask: Optional[torch.Tensor] = None,
|
153 |
-
inputs_embeds: Optional[torch.Tensor] = None,
|
154 |
-
encoder_hidden_states: Optional[torch.Tensor] = None,
|
155 |
-
encoder_attention_mask: Optional[torch.Tensor] = None,
|
156 |
-
labels: Optional[torch.Tensor] = None,
|
157 |
-
use_cache: Optional[bool] = None,
|
158 |
-
output_attentions: Optional[bool] = None,
|
159 |
-
output_hidden_states: Optional[bool] = None,
|
160 |
-
return_dict: Optional[bool] = None,
|
161 |
-
num_logits_to_keep: int = 0,
|
162 |
-
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
|
163 |
r"""
|
164 |
-
|
165 |
-
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
166 |
-
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
167 |
-
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
168 |
"""
|
169 |
-
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
|
|
|
|
174 |
attention_mask=attention_mask,
|
175 |
-
token_type_ids=token_type_ids,
|
176 |
-
position_ids=position_ids,
|
177 |
-
head_mask=head_mask,
|
178 |
-
inputs_embeds=inputs_embeds,
|
179 |
-
encoder_hidden_states=encoder_hidden_states,
|
180 |
-
encoder_attention_mask=encoder_attention_mask,
|
181 |
-
use_cache=use_cache,
|
182 |
-
output_attentions=output_attentions,
|
183 |
-
output_hidden_states=output_hidden_states,
|
184 |
-
return_dict=return_dict,
|
185 |
)
|
186 |
hidden_states = transformer_outputs[0]
|
187 |
|
188 |
# If GRPO requested only the last tokens, slice accordingly.
|
189 |
if num_logits_to_keep > 0:
|
190 |
-
lm_logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
|
191 |
else:
|
192 |
-
lm_logits = self.lm_head(hidden_states)
|
193 |
-
|
194 |
-
# lm_logits = self.lm_head(hidden_states)
|
195 |
-
|
196 |
loss = None
|
197 |
-
if labels is not None:
|
198 |
-
# Shift so that tokens < n predict n
|
199 |
-
shift_logits = lm_logits[..., :-1, :].contiguous()
|
200 |
-
shift_labels = labels[..., 1:].contiguous().to(shift_logits.device)
|
201 |
-
# Flatten the tokens
|
202 |
-
loss_fct = CrossEntropyLoss()
|
203 |
-
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
204 |
-
|
205 |
-
if not return_dict:
|
206 |
-
output = (lm_logits,) + transformer_outputs[1:]
|
207 |
-
return ((loss,) + output) if loss is not None else output
|
208 |
-
|
209 |
return CausalLMOutputWithCrossAttentions(
|
210 |
loss=loss,
|
211 |
logits=lm_logits,
|
@@ -214,9 +188,6 @@ class StarVectorForCausalLM(PreTrainedModel):
|
|
214 |
attentions=transformer_outputs.attentions,
|
215 |
cross_attentions=transformer_outputs.cross_attentions,
|
216 |
)
|
217 |
-
|
218 |
-
# def forward(self, batch):
|
219 |
-
# return self.model(batch)
|
220 |
|
221 |
def generate_im2svg(self, batch, **kwargs):
|
222 |
return self.model.generate_im2svg(batch, **kwargs)
|
|
|
35 |
self.mean = mean
|
36 |
self.std = std
|
37 |
self.size = size
|
|
|
38 |
self.normalize = transforms.Normalize(mean=mean, std=std)
|
39 |
|
40 |
self.transform = transforms.Compose([
|
|
|
49 |
super().__init__(tokenizer=tokenizer)
|
50 |
|
51 |
|
52 |
+
def __call__(self, images=None, text=None, max_length=None, **kwargs) -> BatchFeature:
|
53 |
"""
|
54 |
Process images and/or text inputs.
|
55 |
|
|
|
64 |
image_inputs = {}
|
65 |
if images is not None:
|
66 |
if isinstance(images, (list, tuple)):
|
67 |
+
images_ = torch.stack([self.transform(img) for img in images])
|
68 |
else:
|
69 |
images_ = self.transform(images)
|
70 |
image_inputs = {"pixel_values": images_}
|
71 |
|
72 |
text_inputs = {}
|
73 |
if text is not None:
|
74 |
+
text_inputs = self.tokenizer(
|
75 |
+
text, truncation=True,
|
76 |
+
add_special_tokens=True,
|
77 |
+
padding='longest',
|
78 |
+
max_length=max_length,
|
79 |
+
return_tensors="pt"
|
80 |
+
)
|
81 |
+
|
82 |
return BatchFeature(data={**text_inputs, **image_inputs})
|
83 |
|
84 |
+
def _pad_to_square(self, img):
|
85 |
+
# Calculate padding to make the image square
|
86 |
+
width, height = img.size
|
87 |
+
max_dim = max(width, height)
|
88 |
+
padding = [(max_dim - width) // 2, (max_dim - height) // 2]
|
89 |
+
padding += [max_dim - width - padding[0], max_dim - height - padding[1]]
|
90 |
+
return pad(img, padding, fill=255) # Assuming white padding
|
91 |
+
|
92 |
+
|
93 |
AutoProcessor.register(SimpleStarVectorProcessor, SimpleStarVectorProcessor)
|
94 |
|
95 |
|
|
|
143 |
else:
|
144 |
from starvector.model.models.starvector_v1 import StarVectorStarCoder
|
145 |
self.model = StarVectorStarCoder(config=config, **kwargs)
|
146 |
+
|
147 |
|
148 |
@property
|
149 |
def supports_gradient_checkpointing(self):
|
|
|
158 |
if hasattr(self.model, 'svg_transformer') and hasattr(self.model.svg_transformer, 'gradient_checkpointing_enable'):
|
159 |
self.model.svg_transformer.gradient_checkpointing_enable()
|
160 |
|
161 |
+
def forward(self, inputs_embeds, input_ids, num_generations, num_logits_to_keep) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
r"""
|
163 |
+
Wrapper for the forward pass of the model.
|
|
|
|
|
|
|
164 |
"""
|
165 |
+
device = inputs_embeds.device
|
166 |
|
167 |
+
completion_embeds = self.model._get_embeddings(input_ids)
|
168 |
+
inputs_embeds = torch.cat([inputs_embeds.repeat(num_generations, 1, 1), completion_embeds], dim=1)
|
169 |
+
attention_mask = torch.ones_like(inputs_embeds[:, :, 0]).to(device)
|
170 |
+
|
171 |
+
transformer_outputs = self.model.svg_transformer.transformer.transformer(
|
172 |
+
inputs_embeds=inputs_embeds,
|
173 |
attention_mask=attention_mask,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
)
|
175 |
hidden_states = transformer_outputs[0]
|
176 |
|
177 |
# If GRPO requested only the last tokens, slice accordingly.
|
178 |
if num_logits_to_keep > 0:
|
179 |
+
lm_logits = self.model.svg_transformer.transformer.lm_head(hidden_states[:, -num_logits_to_keep:, :])
|
180 |
else:
|
181 |
+
lm_logits = self.model.svg_transformer.transformer.lm_head(hidden_states)
|
|
|
|
|
|
|
182 |
loss = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
return CausalLMOutputWithCrossAttentions(
|
184 |
loss=loss,
|
185 |
logits=lm_logits,
|
|
|
188 |
attentions=transformer_outputs.attentions,
|
189 |
cross_attentions=transformer_outputs.cross_attentions,
|
190 |
)
|
|
|
|
|
|
|
191 |
|
192 |
def generate_im2svg(self, batch, **kwargs):
|
193 |
return self.model.generate_im2svg(batch, **kwargs)
|