jiangbop commited on
Commit
bf700b5
·
verified ·
1 Parent(s): c08ad7d

Update modeling_skywork_chat.py

Browse files
Files changed (1) hide show
  1. modeling_skywork_chat.py +8 -34
modeling_skywork_chat.py CHANGED
@@ -212,10 +212,7 @@ class SkyworkChatModel(PreTrainedModel):
212
 
213
  img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
214
  self.img_context_token_id = img_context_token_id
215
- # print("##############1################")
216
- # print(self.img_context_token_id)
217
- # print("##############1################")
218
- # exit()
219
 
220
  if verbose and pixel_values is not None:
221
  image_bs = pixel_values.shape[0]
@@ -265,18 +262,13 @@ class SkyworkChatModel(PreTrainedModel):
265
 
266
  img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
267
  self.img_context_token_id = img_context_token_id
268
- # print("##############2################")
269
- # print(self.img_context_token_id)
270
- # print("##############2################")
271
 
272
  template = get_conv_template(self.template)
273
  template.system_message = self.system_message
274
  eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
275
- # print("##############2.5################")
276
- # print(template.sep.strip())
277
- # print(eos_token_id)
278
- # print("##############2.5################")
279
-
280
  history = [] if history is None else history
281
  for (old_question, old_answer) in history:
282
  template.append_message(template.roles[0], old_question)
@@ -284,13 +276,6 @@ class SkyworkChatModel(PreTrainedModel):
284
  template.append_message(template.roles[0], question)
285
  template.append_message(template.roles[1], None)
286
  query = template.get_prompt()
287
- # print("##############3################")
288
- # print(query)
289
- # print("##############3################")
290
- # query = """<|begin▁of▁sentence|>user
291
- # <image>
292
- # 图片内容是什么?<|end▁of▁sentence|>
293
- # <|begin▁of▁sentence|>assistant"""
294
 
295
  if verbose and pixel_values is not None:
296
  image_bs = pixel_values.shape[0]
@@ -299,9 +284,7 @@ class SkyworkChatModel(PreTrainedModel):
299
  for num_patches in num_patches_list:
300
  image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
301
  query = query.replace('<image>', image_tokens, 1)
302
- # print("##############4################")
303
- # # print(query)
304
- # print("##############4################")
305
 
306
  model_inputs = tokenizer(query, return_tensors='pt')
307
  input_ids = model_inputs['input_ids'].to(self.device)
@@ -316,7 +299,7 @@ class SkyworkChatModel(PreTrainedModel):
316
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
317
  response = response.split(template.sep.strip())[0].strip()
318
  history.append((question, response))
319
- # print("###" + str(response))
320
  if return_history:
321
  return response, history
322
  else:
@@ -350,12 +333,7 @@ class SkyworkChatModel(PreTrainedModel):
350
 
351
  input_ids = input_ids.reshape(B * N)
352
  selected = (input_ids == self.img_context_token_id)
353
- # print("#######################5####################")
354
- # print(self.img_context_token_id)
355
- # print(selected)
356
- # print(selected.sum())
357
- # print("#######################5####################")
358
- # exit()
359
  assert selected.sum() != 0
360
  input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
361
 
@@ -363,11 +341,7 @@ class SkyworkChatModel(PreTrainedModel):
363
  else:
364
  input_embeds = self.language_model.get_input_embeddings()(input_ids)
365
 
366
- # print("#######################6####################")
367
- # print(attention_mask)
368
- # print(attention_mask.sum())
369
- # print(output_hidden_states)
370
- # print("#######################6####################")
371
 
372
  outputs = self.language_model.generate(
373
  inputs_embeds=input_embeds,
 
212
 
213
  img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
214
  self.img_context_token_id = img_context_token_id
215
+
 
 
 
216
 
217
  if verbose and pixel_values is not None:
218
  image_bs = pixel_values.shape[0]
 
262
 
263
  img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
264
  self.img_context_token_id = img_context_token_id
265
+
 
 
266
 
267
  template = get_conv_template(self.template)
268
  template.system_message = self.system_message
269
  eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
270
+
271
+
 
 
 
272
  history = [] if history is None else history
273
  for (old_question, old_answer) in history:
274
  template.append_message(template.roles[0], old_question)
 
276
  template.append_message(template.roles[0], question)
277
  template.append_message(template.roles[1], None)
278
  query = template.get_prompt()
 
 
 
 
 
 
 
279
 
280
  if verbose and pixel_values is not None:
281
  image_bs = pixel_values.shape[0]
 
284
  for num_patches in num_patches_list:
285
  image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
286
  query = query.replace('<image>', image_tokens, 1)
287
+
 
 
288
 
289
  model_inputs = tokenizer(query, return_tensors='pt')
290
  input_ids = model_inputs['input_ids'].to(self.device)
 
299
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
300
  response = response.split(template.sep.strip())[0].strip()
301
  history.append((question, response))
302
+
303
  if return_history:
304
  return response, history
305
  else:
 
333
 
334
  input_ids = input_ids.reshape(B * N)
335
  selected = (input_ids == self.img_context_token_id)
336
+
 
 
 
 
 
337
  assert selected.sum() != 0
338
  input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
339
 
 
341
  else:
342
  input_embeds = self.language_model.get_input_embeddings()(input_ids)
343
 
344
+
 
 
 
 
345
 
346
  outputs = self.language_model.generate(
347
  inputs_embeds=input_embeds,