Files changed (1) hide show
  1. microsoft_OmniParser-v2.0.json +55 -0
microsoft_OmniParser-v2.0.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bomFormat": "CycloneDX",
3
+ "specVersion": "1.6",
4
+ "serialNumber": "urn:uuid:9ea99512-bee0-49b7-b970-2a0390fbd49a",
5
+ "version": 1,
6
+ "metadata": {
7
+ "timestamp": "2025-06-05T09:36:46.991748+00:00",
8
+ "component": {
9
+ "type": "machine-learning-model",
10
+ "bom-ref": "microsoft/OmniParser-v2.0-b6ddf12a-9a42-527c-a82b-9803ccf53013",
11
+ "name": "microsoft/OmniParser-v2.0",
12
+ "externalReferences": [
13
+ {
14
+ "url": "https://huggingface.co/microsoft/OmniParser-v2.0",
15
+ "type": "documentation"
16
+ }
17
+ ],
18
+ "modelCard": {
19
+ "properties": [
20
+ {
21
+ "name": "library_name",
22
+ "value": "transformers"
23
+ }
24
+ ],
25
+ "consideration": {
26
+ "useCases": "- OmniParser is designed to be able to convert unstructured screenshot image into structured list of elements including interactable regions location and captions of icons on its potential functionality.- OmniParser is intended to be used in settings where users are already trained on responsible analytic approaches and critical reasoning is expected. OmniParser is capable of providing extracted information from the screenshot, however human judgement is needed for the output of OmniParser.- OmniParser is intended to be used on various screenshots, which includes both PC and Phone, and also on various applications."
27
+ }
28
+ },
29
+ "authors": [
30
+ {
31
+ "name": "microsoft"
32
+ }
33
+ ],
34
+ "licenses": [
35
+ {
36
+ "license": {
37
+ "id": "MIT",
38
+ "url": "https://spdx.org/licenses/MIT.html"
39
+ }
40
+ }
41
+ ],
42
+ "description": "OmniParser is a general screen parsing tool, which interprets/converts UI screenshot to structured format, to improve existing LLM based UI agent.Training Datasets include: 1) an interactable icon detection dataset, which was curated from popular web pages and automatically annotated to highlight clickable and actionable regions, and 2) an icon description dataset, designed to associate each UI element with its corresponding function.This model hub includes a finetuned version of YOLOv8 and a finetuned Florence-2 base model on the above dataset respectively. For more details of the models used and finetuning, please refer to the [paper](https://arxiv.org/abs/2408.00203).",
43
+ "tags": [
44
+ "transformers",
45
+ "safetensors",
46
+ "endpoint-template",
47
+ "custom_code",
48
+ "arxiv:2408.00203",
49
+ "license:mit",
50
+ "endpoints_compatible",
51
+ "region:us"
52
+ ]
53
+ }
54
+ }
55
+ }