Spaces:
Running
on
Zero
Running
on
Zero
update app (#7)
Browse files- update app (4ee824d340708d867e2c390b4ecc820d56395d62)
app.py
CHANGED
@@ -65,6 +65,15 @@ model_f = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
65 |
torch_dtype=torch.float16
|
66 |
).to(device).eval()
|
67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
def downsample_video(video_path):
|
69 |
"""
|
70 |
Downsamples the video to evenly spaced frames.
|
@@ -109,6 +118,9 @@ def generate_image(model_name: str, text: str, image: Image.Image,
|
|
109 |
elif model_name == "olmOCR-7B-0825":
|
110 |
processor = processor_f
|
111 |
model = model_f
|
|
|
|
|
|
|
112 |
else:
|
113 |
yield "Invalid model selected.", "Invalid model selected."
|
114 |
return
|
@@ -166,6 +178,9 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
166 |
elif model_name == "olmOCR-7B-0825":
|
167 |
processor = processor_f
|
168 |
model = model_f
|
|
|
|
|
|
|
169 |
else:
|
170 |
yield "Invalid model selected.", "Invalid model selected."
|
171 |
return
|
@@ -213,7 +228,10 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
213 |
|
214 |
# Define examples for image and video inference
|
215 |
image_examples = [
|
216 |
-
["
|
|
|
|
|
|
|
217 |
["Explain the pie-chart in detail.", "images/2.jpg"],
|
218 |
["Jsonify Data.", "images/1.jpg"],
|
219 |
]
|
@@ -241,7 +259,7 @@ css = """
|
|
241 |
|
242 |
# Create the Gradio Interface
|
243 |
with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
244 |
-
gr.Markdown("# **[Qwen2.5-VL](https://huggingface.co/collections/prithivMLmods/multimodal-implementations-67c9982ea04b39f0608badb0)**")
|
245 |
with gr.Row():
|
246 |
with gr.Column():
|
247 |
with gr.Tabs():
|
@@ -271,21 +289,36 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
271 |
with gr.Column():
|
272 |
with gr.Column(elem_classes="canvas-output"):
|
273 |
gr.Markdown("## Output")
|
274 |
-
output = gr.Textbox(label="Raw Output", interactive=False, lines=
|
275 |
|
276 |
with gr.Accordion("(Result.md)", open=False):
|
277 |
markdown_output = gr.Markdown()
|
278 |
|
279 |
model_choice = gr.Radio(
|
280 |
-
choices=["Qwen2.5-VL-7B-Instruct", "Qwen2.5-VL-3B-Instruct", "Qwen2.5-VL-7B-Abliterated-Caption-it", "olmOCR-7B-0825"],
|
281 |
label="Select Model",
|
282 |
value="Qwen2.5-VL-7B-Instruct"
|
283 |
)
|
284 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Qwen2.5-VL/discussions)")
|
285 |
-
|
286 |
-
gr.Markdown(
|
287 |
-
|
288 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
290 |
|
291 |
image_submit.click(
|
|
|
65 |
torch_dtype=torch.float16
|
66 |
).to(device).eval()
|
67 |
|
68 |
+
# Load R-4B
|
69 |
+
MODEL_ID_Y = "YannQi/R-4B"
|
70 |
+
processor_y = AutoProcessor.from_pretrained(MODEL_ID_Y, trust_remote_code=True)
|
71 |
+
model_y = AutoModel.from_pretrained(
|
72 |
+
MODEL_ID_Y,
|
73 |
+
trust_remote_code=True,
|
74 |
+
torch_dtype=torch.float16
|
75 |
+
).to(device).eval()
|
76 |
+
|
77 |
def downsample_video(video_path):
|
78 |
"""
|
79 |
Downsamples the video to evenly spaced frames.
|
|
|
118 |
elif model_name == "olmOCR-7B-0825":
|
119 |
processor = processor_f
|
120 |
model = model_f
|
121 |
+
elif model_name == "R-4B":
|
122 |
+
processor = processor_y
|
123 |
+
model = model_y
|
124 |
else:
|
125 |
yield "Invalid model selected.", "Invalid model selected."
|
126 |
return
|
|
|
178 |
elif model_name == "olmOCR-7B-0825":
|
179 |
processor = processor_f
|
180 |
model = model_f
|
181 |
+
elif model_name == "R-4B":
|
182 |
+
processor = processor_y
|
183 |
+
model = model_y
|
184 |
else:
|
185 |
yield "Invalid model selected.", "Invalid model selected."
|
186 |
return
|
|
|
228 |
|
229 |
# Define examples for image and video inference
|
230 |
image_examples = [
|
231 |
+
["Explain the content in detail.", "images/D.jpg"],
|
232 |
+
["Explain the content (ocr).", "images/O.jpg"],
|
233 |
+
["What is the core meaning of the poem?", "images/S.jpg"],
|
234 |
+
["Provide a detailed caption for the image.", "images/A.jpg"],
|
235 |
["Explain the pie-chart in detail.", "images/2.jpg"],
|
236 |
["Jsonify Data.", "images/1.jpg"],
|
237 |
]
|
|
|
259 |
|
260 |
# Create the Gradio Interface
|
261 |
with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
262 |
+
gr.Markdown("# **[Qwen2.5-VL-Outpost](https://huggingface.co/collections/prithivMLmods/multimodal-implementations-67c9982ea04b39f0608badb0)**")
|
263 |
with gr.Row():
|
264 |
with gr.Column():
|
265 |
with gr.Tabs():
|
|
|
289 |
with gr.Column():
|
290 |
with gr.Column(elem_classes="canvas-output"):
|
291 |
gr.Markdown("## Output")
|
292 |
+
output = gr.Textbox(label="Raw Output", interactive=False, lines=3, scale=2)
|
293 |
|
294 |
with gr.Accordion("(Result.md)", open=False):
|
295 |
markdown_output = gr.Markdown()
|
296 |
|
297 |
model_choice = gr.Radio(
|
298 |
+
choices=["Qwen2.5-VL-7B-Instruct", "Qwen2.5-VL-3B-Instruct", "R-4B", "Qwen2.5-VL-7B-Abliterated-Caption-it", "olmOCR-7B-0825"],
|
299 |
label="Select Model",
|
300 |
value="Qwen2.5-VL-7B-Instruct"
|
301 |
)
|
302 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Qwen2.5-VL/discussions)")
|
303 |
+
|
304 |
+
gr.Markdown(
|
305 |
+
"""
|
306 |
+
> [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct): The Qwen2.5-VL-7B-Instruct model is a multimodal AI model developed by Alibaba Cloud that excels at understanding both text and images. It's a Vision-Language Model (VLM) designed to handle various visual understanding tasks, including image understanding, video analysis, and even multilingual support.
|
307 |
+
>
|
308 |
+
> [Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct): Qwen2.5-VL-3B-Instruct is an instruction-tuned vision-language model from Alibaba Cloud, built upon the Qwen2-VL series. It excels at understanding and generating text related to both visual and textual inputs, making it capable of tasks like image captioning, visual question answering, and object localization. The model also supports long video understanding and structured data extraction.
|
309 |
+
"""
|
310 |
+
)
|
311 |
+
|
312 |
+
gr.Markdown(
|
313 |
+
"""
|
314 |
+
> [Qwen2.5-VL-7B-Abliterated-Caption-it](prithivMLmods/Qwen2.5-VL-7B-Abliterated-Caption-it): Qwen2.5-VL-7B-Abliterated-Caption-it is a fine-tuned version of Qwen2.5-VL-7B-Instruct, optimized for Abliterated Captioning / Uncensored Captioning. This model excels at generating detailed, context-rich, and high-fidelity captions across diverse image categories and variational aspect ratios, offering robust visual understanding without filtering or censorship.
|
315 |
+
>
|
316 |
+
> [olmOCR-7B-0825](https://huggingface.co/allenai/olmOCR-7B-0825): olmOCR-7B-0825 is a 7B parameter open large model designed for OCR tasks with robust text extraction, especially in complex document layouts. Multimodal model emphasizing strong document reading and extraction capabilities combined with vision-language understanding to support detailed document parsing tasks.
|
317 |
+
"""
|
318 |
+
)
|
319 |
+
|
320 |
+
gr.Markdown("> [R-4B](https://huggingface.co/YannQi/R-4B): R-4B is a multimodal large language model designed for adaptive auto-thinking, able to intelligently switch between detailed reasoning and direct responses to optimize quality and efficiency. It achieves state-of-the-art performance and efficiency with user-controllable response modes, making it ideal for both simple and complex tasks.")
|
321 |
+
|
322 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
323 |
|
324 |
image_submit.click(
|