amokrov commited on
Commit
2391db8
·
verified ·
1 Parent(s): 243f9df

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -75
README.md CHANGED
@@ -46,91 +46,29 @@ pip install git+https://github.com/huggingface/optimum-intel.git
46
  from PIL import Image
47
  import requests
48
  from optimum.intel.openvino import OVModelForVisualCausalLM
49
- from transformers import AutoTokenizer, TextStreamer
50
 
51
- model_id = "OpenVINO/pixtral-12b-int8-ov"
52
-
53
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
54
-
55
- ov_model = OVModelForVisualCausalLM.from_pretrained(model_id, trust_remote_code=True)
56
- prompt = "What is unusual on this picture?"
57
-
58
- url = "https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11"
59
- image = Image.open(requests.get(url, stream=True).raw)
60
-
61
- inputs = ov_model.preprocess_inputs(text=prompt, image=image, tokenizer=tokenizer, config=ov_model.config)
62
-
63
- generation_args = {
64
- "max_new_tokens": 100,
65
- "streamer": TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
66
- }
67
-
68
- generate_ids = ov_model.generate(**inputs, **generation_args)
69
-
70
- generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
71
- response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True)[0]
72
-
73
- ```
74
-
75
- ## Running Model Inference with [OpenVINO GenAI](https://github.com/openvinotoolkit/openvino.genai)
76
-
77
- 1. Install packages required for using OpenVINO GenAI.
78
- ```
79
- pip install --pre -U --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/pre-release openvino openvino-tokenizers openvino-genai
80
-
81
- pip install huggingface_hub
82
- ```
83
-
84
- 2. Download model from HuggingFace Hub
85
-
86
- ```
87
- import huggingface_hub as hf_hub
88
 
89
  model_id = "OpenVINO/pixtral-12b-int8-ov"
90
- model_path = "pixtral-12b-int8-ov"
91
-
92
- hf_hub.snapshot_download(model_id, local_dir=model_path)
93
 
94
- ```
95
-
96
- 1. Run model inference:
97
 
98
- ```
99
- import openvino_genai as ov_genai
100
- import requests
101
- from PIL import Image
102
- from io import BytesIO
103
- import numpy as np
104
- import openvino as ov
105
-
106
- device = "CPU"
107
- pipe = ov_genai.VLMPipeline(model_path, device)
108
-
109
- def load_image(image_file):
110
- if isinstance(image_file, str) and (image_file.startswith("http") or image_file.startswith("https")):
111
- response = requests.get(image_file)
112
- image = Image.open(BytesIO(response.content)).convert("RGB")
113
- else:
114
- image = Image.open(image_file).convert("RGB")
115
- image_data = np.array(image.getdata()).reshape(1, image.size[1], image.size[0], 3).astype(np.byte)
116
- return ov.Tensor(image_data)
117
-
118
- prompt = "What is unusual on this picture?"
119
 
120
  url = "https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11"
121
- image_tensor = load_image(url)
122
 
123
- def streamer(subword: str) -> bool:
124
- print(subword, end="", flush=True)
125
- return False
126
 
127
- pipe.start_chat()
128
- output = pipe.generate(prompt, image=image_tensor, max_new_tokens=100, streamer=streamer)
129
- pipe.finish_chat()
130
- ```
131
-
132
- More GenAI usage examples can be found in OpenVINO GenAI library [docs](https://github.com/openvinotoolkit/openvino.genai/blob/master/src/README.md) and [samples](https://github.com/openvinotoolkit/openvino.genai?tab=readme-ov-file#openvino-genai-samples)
133
 
 
134
 
135
  ## Limitations
136
 
 
46
  from PIL import Image
47
  import requests
48
  from optimum.intel.openvino import OVModelForVisualCausalLM
49
+ from transformers import AutoProcessor, TextStreamer
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  model_id = "OpenVINO/pixtral-12b-int8-ov"
 
 
 
53
 
54
+ processor = AutoProcessor.from_pretrained(model_id)
55
+ ov_model = OVModelForVisualCausalLM.from_pretrained(model_id, trust_remote_code=True)
 
56
 
57
+ question = "What is unusual in this picture?"
58
+ messages = [
59
+ {"role": "user", "content": [{"type": "text", "content": question}, {"type": "image"}]},
60
+ ]
61
+ text = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  url = "https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11"
64
+ raw_image = Image.open(requests.get(url, stream=True).raw)
65
 
66
+ inputs = processor(text=text, images=[raw_image], return_tensors="pt")
67
+ streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
 
68
 
69
+ output = ov_model.generate(**inputs, do_sample=False, max_new_tokens=100, temperature=None, top_p=None, streamer=streamer)
 
 
 
 
 
70
 
71
+ ```
72
 
73
  ## Limitations
74