Safetensors
qwen2_vl

How to use


import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
from transformers import AutoProcessor
from modelscope import Qwen2VLForConditionalGeneration
from qwen_vl_utils import process_vision_info
import torch

# We recommend enabling flash_attention_2 for better acceleration and memory saving.
model_dir = "Qwen2-VL-7B-Instruct-sft"
model = Qwen2VLForConditionalGeneration.from_pretrained(
    model_dir,
    torch_dtype=torch.bfloat16,
    # attn_implementation="flash_attention_2",
    device_map="auto",
)
model.eval()
processor = AutoProcessor.from_pretrained(model_dir)


def format(images, text):
    content = []
    for img in images:
        content.append({"type": "image", "image": img})
    content.append({"type": "text", "text": text})
    messages = [
        {
            "role": "user",
            "content": content,
        }
    ]
    return messages



imgs = ["https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"]
text = "Describe this image."
messages = format(imgs, text)
text = processor.apply_chat_template(
    messages, tokenize=False, add_generation_prompt=True,add_vision_id=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
    text=[text],
    images=image_inputs,
    videos=video_inputs,
    padding=True,
    return_tensors="pt",
)
inputs = inputs.to(model.device)

with torch.no_grad():
    generated_ids = model.generate(**inputs, max_new_tokens=1024)
generated_ids_trimmed = [
    out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
    generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)[0].strip()

Citation

@misc{wang2025cigeval,
      title={A Unified Agentic Framework for Evaluating Conditional Image Generation}, 
      author={Jifang Wang and Xue Yang and Longyue Wang and Zhenran Xu and Yiyu Wang and Yaowei Wang and Weihua Luo and Kaifu Zhang and Baotian Hu and Min Zhang},
      year={2025},
      eprint={2504.07046},
      archivePrefix={arXiv},
      primaryClass={cs.CV},
      url={https://arxiv.org/abs/2504.07046}, 
}
Downloads last month
11
Safetensors
Model size
8.29B params
Tensor type
BF16
ยท
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support