JH-C-k commited on
Commit
8d68508
·
verified ·
1 Parent(s): 6ace3e1

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ clip/google/siglip-base-patch16-256/model.safetensors filter=lfs diff=lfs merge=
32
  clip/laion/CLIP-ViT-B-32-laion2B-s34B-b79K/open_clip_model.safetensors filter=lfs diff=lfs merge=lfs -text
33
  clip/laion/CLIP-ViT-B-32-laion2B-s34B-b79K/model.safetensors filter=lfs diff=lfs merge=lfs -text
34
  clip/facebook/metaclip-g14-fullcc2.5b/model-00003-of-00003.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
32
  clip/laion/CLIP-ViT-B-32-laion2B-s34B-b79K/open_clip_model.safetensors filter=lfs diff=lfs merge=lfs -text
33
  clip/laion/CLIP-ViT-B-32-laion2B-s34B-b79K/model.safetensors filter=lfs diff=lfs merge=lfs -text
34
  clip/facebook/metaclip-g14-fullcc2.5b/model-00003-of-00003.safetensors filter=lfs diff=lfs merge=lfs -text
35
+ clip/laion/CLIP-ViT-L-14-laion2B-s32B-b82K/open_clip_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
clip/apple/DFN5B-CLIP-ViT-H-14/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
clip/apple/DFN5B-CLIP-ViT-H-14/LICENSE ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Disclaimer: IMPORTANT: This Apple Machine Learning Research Model is
2
+ specifically developed and released by Apple Inc. ("Apple") for the sole purpose
3
+ of scientific research of artificial intelligence and machine-learning
4
+ technology. “Apple Machine Learning Research Model” means the model, including
5
+ but not limited to algorithms, formulas, trained model weights, parameters,
6
+ configurations, checkpoints, and any related materials (including
7
+ documentation).
8
+
9
+ This Apple Machine Learning Research Model is provided to You by
10
+ Apple in consideration of your agreement to the following terms, and your use,
11
+ modification, creation of Model Derivatives, and or redistribution of the Apple
12
+ Machine Learning Research Model constitutes acceptance of this Agreement. If You
13
+ do not agree with these terms, please do not use, modify, create Model
14
+ Derivatives of, or distribute this Apple Machine Learning Research Model or
15
+ Model Derivatives.
16
+
17
+ * License Scope: In consideration of your agreement to abide by the following
18
+ terms, and subject to these terms, Apple hereby grants you a personal,
19
+ non-exclusive, worldwide, non-transferable, royalty-free, revocable, and
20
+ limited license, to use, copy, modify, distribute, and create Model
21
+ Derivatives (defined below) of the Apple Machine Learning Research Model
22
+ exclusively for Research Purposes. You agree that any Model Derivatives You
23
+ may create or that may be created for You will be limited to Research Purposes
24
+ as well. “Research Purposes” means non-commercial scientific research and
25
+ academic development activities, such as experimentation, analysis, testing
26
+ conducted by You with the sole intent to advance scientific knowledge and
27
+ research. “Research Purposes” does not include any commercial exploitation,
28
+ product development or use in any commercial product or service.
29
+
30
+ * Distribution of Apple Machine Learning Research Model and Model Derivatives:
31
+ If you choose to redistribute Apple Machine Learning Research Model or its
32
+ Model Derivatives, you must provide a copy of this Agreement to such third
33
+ party, and ensure that the following attribution notice be provided: “Apple
34
+ Machine Learning Research Model is licensed under the Apple Machine Learning
35
+ Research Model License Agreement.” Additionally, all Model Derivatives must
36
+ clearly be identified as such, including disclosure of modifications and
37
+ changes made to the Apple Machine Learning Research Model. The name,
38
+ trademarks, service marks or logos of Apple may not be used to endorse or
39
+ promote Model Derivatives or the relationship between You and Apple. “Model
40
+ Derivatives” means any models or any other artifacts created by modifications,
41
+ improvements, adaptations, alterations to the architecture, algorithm or
42
+ training processes of the Apple Machine Learning Research Model, or by any
43
+ retraining, fine-tuning of the Apple Machine Learning Research Model.
44
+
45
+ * No Other License: Except as expressly stated in this notice, no other rights
46
+ or licenses, express or implied, are granted by Apple herein, including but
47
+ not limited to any patent, trademark, and similar intellectual property rights
48
+ worldwide that may be infringed by the Apple Machine Learning Research Model,
49
+ the Model Derivatives or by other works in which the Apple Machine Learning
50
+ Research Model may be incorporated.
51
+
52
+ * Compliance with Laws: Your use of Apple Machine Learning Research Model must
53
+ be in compliance with all applicable laws and regulations.
54
+
55
+ * Term and Termination: The term of this Agreement will begin upon your
56
+ acceptance of this Agreement or use of the Apple Machine Learning Research
57
+ Model and will continue until terminated in accordance with the following
58
+ terms. Apple may terminate this Agreement at any time if You are in breach of
59
+ any term or condition of this Agreement. Upon termination of this Agreement,
60
+ You must cease to use all Apple Machine Learning Research Models and Model
61
+ Derivatives and permanently delete any copy thereof. Sections 3, 6 and 7 will
62
+ survive termination.
63
+
64
+ * Disclaimer and Limitation of Liability: This Apple Machine Learning Research
65
+ Model and any outputs generated by the Apple Machine Learning Research Model
66
+ are provided on an “AS IS” basis. APPLE MAKES NO WARRANTIES, EXPRESS OR
67
+ IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
68
+ NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
69
+ REGARDING THE APPLE MACHINE LEARNING RESEARCH MODEL OR OUTPUTS GENERATED BY
70
+ THE APPLE MACHINE LEARNING RESEARCH MODEL. You are solely responsible for
71
+ determining the appropriateness of using or redistributing the Apple Machine
72
+ Learning Research Model and any outputs of the Apple Machine Learning Research
73
+ Model and assume any risks associated with Your use of the Apple Machine
74
+ Learning Research Model and any output and results. IN NO EVENT SHALL APPLE BE
75
+ LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
76
+ IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF
77
+ THE APPLE MACHINE LEARNING RESEARCH MODEL AND ANY OUTPUTS OF THE APPLE MACHINE
78
+ LEARNING RESEARCH MODEL, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
79
+ TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS
80
+ BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
81
+
82
+ * Governing Law: This Agreement will be governed by and construed under the laws
83
+ of the State of California without regard to its choice of law principles. The
84
+ Convention on Contracts for the International Sale of Goods shall not apply to
85
+ the Agreement except that the arbitration clause and any arbitration hereunder
86
+ shall be governed by the Federal Arbitration Act, Chapters 1 and 2. 
87
+
88
+ Copyright (C) 2025 Apple Inc. All Rights Reserved.
clip/apple/DFN5B-CLIP-ViT-H-14/README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apple-amlr
3
+ license_name: apple-sample-code-license
4
+ license_link: LICENSE
5
+ ---
6
+ A CLIP (Contrastive Language-Image Pre-training) model trained on DFN-5B.
7
+ Data Filtering Networks (DFNs) are small networks used to automatically filter large pools of uncurated data.
8
+ This model was trained on 5B images that were filtered from a pool of 43B uncurated image-text pairs
9
+ (12.8B image-text pairs from CommonPool-12.8B + 30B additional public image-text pairs).
10
+
11
+ This model has been converted to PyTorch from the original JAX checkpoints from Axlearn (https://github.com/apple/axlearn).
12
+ These weights are directly usable in OpenCLIP (image + text).
13
+
14
+
15
+ ## Model Details
16
+
17
+ - **Model Type:** Contrastive Image-Text, Zero-Shot Image Classification.
18
+ - **Dataset:** DFN-5b
19
+ - **Papers:**
20
+ - Data Filtering Networks: https://arxiv.org/abs/2309.17425
21
+ - **Samples Seen:** 39B
22
+ ## Model Metrics
23
+
24
+ | Eval Dataset | Metric |
25
+ |:-----------------------|---------:|
26
+ | ImageNet 1k | 0.8344 |
27
+ | Caltech-101 | 0.954935 |
28
+ | CIFAR-10 | 0.9878 |
29
+ | CIFAR-100 | 0.9051 |
30
+ | CLEVR Counts | 0.2966 |
31
+ | CLEVR Distance | 0.2124 |
32
+ | Country211 | 0.343981 |
33
+ | Describable Textures | 0.706383 |
34
+ | EuroSAT | 0.654815 |
35
+ | FGVC Aircraft | 0.714055 |
36
+ | Food-101 | 0.956792 |
37
+ | GTSRB | 0.677514 |
38
+ | ImageNet Sketch | 0.727308 |
39
+ | ImageNet v2 | 0.773 |
40
+ | ImageNet-A | 0.6988 |
41
+ | ImageNet-O | 0.381 |
42
+ | ImageNet-R | 0.929367 |
43
+ | KITTI Vehicle Distance | 0.336146 |
44
+ | MNIST | 0.8579 |
45
+ | ObjectNet | 0.765156 |
46
+ | Oxford Flowers-102 | 0.899534 |
47
+ | Oxford-IIIT Pet | 0.965515 |
48
+ | Pascal VOC 2007 | 0.818309 |
49
+ | PatchCamelyon | 0.653625 |
50
+ | Rendered SST2 | 0.546403 |
51
+ | RESISC45 | 0.750476 |
52
+ | Stanford Cars | 0.957592 |
53
+ | STL-10 | 0.989 |
54
+ | SUN397 | 0.769149 |
55
+ | SVHN | 0.676168 |
56
+ | Flickr | 0.8645 |
57
+ | MSCOCO | 0.631112 |
58
+ | WinoGAViL | 0.556329 |
59
+ | iWildCam | 0.205549 |
60
+ | Camelyon17 | 0.705034 |
61
+ | FMoW | 0.207482 |
62
+ | Dollar Street | 0.699766 |
63
+ | GeoDE | 0.928184 |
64
+ | **Average** | **0.698347** |
65
+ ## Model Usage
66
+ ### With OpenCLIP
67
+ ```
68
+ import torch
69
+ import torch.nn.functional as F
70
+ from urllib.request import urlopen
71
+ from PIL import Image
72
+ from open_clip import create_model_from_pretrained, get_tokenizer
73
+
74
+ model, preprocess = create_model_from_pretrained('hf-hub:apple/DFN5B-CLIP-ViT-H-14')
75
+ tokenizer = get_tokenizer('ViT-H-14')
76
+
77
+ image = Image.open(urlopen(
78
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
79
+ ))
80
+ image = preprocess(image).unsqueeze(0)
81
+
82
+ labels_list = ["a dog", "a cat", "a donut", "a beignet"]
83
+ text = tokenizer(labels_list, context_length=model.context_length)
84
+
85
+ with torch.no_grad(), torch.cuda.amp.autocast():
86
+ image_features = model.encode_image(image)
87
+ text_features = model.encode_text(text)
88
+ image_features = F.normalize(image_features, dim=-1)
89
+ text_features = F.normalize(text_features, dim=-1)
90
+
91
+ text_probs = torch.sigmoid(image_features @ text_features.T * model.logit_scale.exp() + model.logit_bias)
92
+
93
+ zipped_list = list(zip(labels_list, [round(p.item(), 3) for p in text_probs[0]]))
94
+ print("Label probabilities: ", zipped_list)
95
+ ```
96
+
97
+ ## Citation
98
+ ```bibtex
99
+ @article{fang2023data,
100
+ title={Data Filtering Networks},
101
+ author={Fang, Alex and Jose, Albin Madappally and Jain, Amit and Schmidt, Ludwig and Toshev, Alexander and Shankar, Vaishaal},
102
+ journal={arXiv preprint arXiv:2309.17425},
103
+ year={2023}
104
+ }
105
+
106
+ ```
107
+
clip/apple/DFN5B-CLIP-ViT-H-14/config.json ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "CLIPModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "clip",
9
+ "projection_dim": 1024,
10
+ "text_config": {
11
+ "_name_or_path": "",
12
+ "add_cross_attention": false,
13
+ "architectures": null,
14
+ "attention_dropout": 0.0,
15
+ "bad_words_ids": null,
16
+ "begin_suppress_tokens": null,
17
+ "bos_token_id": 0,
18
+ "chunk_size_feed_forward": 0,
19
+ "cross_attention_hidden_size": null,
20
+ "decoder_start_token_id": null,
21
+ "diversity_penalty": 0.0,
22
+ "do_sample": false,
23
+ "early_stopping": false,
24
+ "encoder_no_repeat_ngram_size": 0,
25
+ "eos_token_id": 49407,
26
+ "exponential_decay_length_penalty": null,
27
+ "finetuning_task": null,
28
+ "forced_bos_token_id": null,
29
+ "forced_eos_token_id": null,
30
+ "hidden_act": "quick_gelu",
31
+ "hidden_size": 1024,
32
+ "id2label": {
33
+ "0": "LABEL_0",
34
+ "1": "LABEL_1"
35
+ },
36
+ "initializer_factor": 1.0,
37
+ "initializer_range": 0.02,
38
+ "intermediate_size": 4096,
39
+ "is_decoder": false,
40
+ "is_encoder_decoder": false,
41
+ "label2id": {
42
+ "LABEL_0": 0,
43
+ "LABEL_1": 1
44
+ },
45
+ "layer_norm_eps": 1e-05,
46
+ "length_penalty": 1.0,
47
+ "max_length": 20,
48
+ "max_position_embeddings": 77,
49
+ "min_length": 0,
50
+ "model_type": "clip_text_model",
51
+ "no_repeat_ngram_size": 0,
52
+ "num_attention_heads": 16,
53
+ "num_beam_groups": 1,
54
+ "num_beams": 1,
55
+ "num_hidden_layers": 24,
56
+ "num_return_sequences": 1,
57
+ "output_attentions": false,
58
+ "output_hidden_states": false,
59
+ "output_scores": false,
60
+ "pad_token_id": 49408,
61
+ "prefix": null,
62
+ "problem_type": null,
63
+ "projection_dim": 512,
64
+ "pruned_heads": {},
65
+ "remove_invalid_values": false,
66
+ "repetition_penalty": 1.0,
67
+ "return_dict": true,
68
+ "return_dict_in_generate": false,
69
+ "sep_token_id": null,
70
+ "suppress_tokens": null,
71
+ "task_specific_params": null,
72
+ "temperature": 1.0,
73
+ "tf_legacy_loss": false,
74
+ "tie_encoder_decoder": false,
75
+ "tie_word_embeddings": true,
76
+ "tokenizer_class": null,
77
+ "top_k": 50,
78
+ "top_p": 1.0,
79
+ "torch_dtype": null,
80
+ "torchscript": false,
81
+ "transformers_version": "4.27.1",
82
+ "typical_p": 1.0,
83
+ "use_bfloat16": false,
84
+ "vocab_size": 49409
85
+ },
86
+ "torch_dtype": "float32",
87
+ "transformers_version": null,
88
+ "vision_config": {
89
+ "_name_or_path": "",
90
+ "add_cross_attention": false,
91
+ "architectures": null,
92
+ "attention_dropout": 0.0,
93
+ "bad_words_ids": null,
94
+ "begin_suppress_tokens": null,
95
+ "bos_token_id": null,
96
+ "chunk_size_feed_forward": 0,
97
+ "cross_attention_hidden_size": null,
98
+ "decoder_start_token_id": null,
99
+ "diversity_penalty": 0.0,
100
+ "do_sample": false,
101
+ "early_stopping": false,
102
+ "encoder_no_repeat_ngram_size": 0,
103
+ "eos_token_id": null,
104
+ "exponential_decay_length_penalty": null,
105
+ "finetuning_task": null,
106
+ "forced_bos_token_id": null,
107
+ "forced_eos_token_id": null,
108
+ "hidden_act": "quick_gelu",
109
+ "hidden_size": 1280,
110
+ "id2label": {
111
+ "0": "LABEL_0",
112
+ "1": "LABEL_1"
113
+ },
114
+ "image_size": 224,
115
+ "initializer_factor": 1.0,
116
+ "initializer_range": 0.02,
117
+ "intermediate_size": 5120,
118
+ "is_decoder": false,
119
+ "is_encoder_decoder": false,
120
+ "label2id": {
121
+ "LABEL_0": 0,
122
+ "LABEL_1": 1
123
+ },
124
+ "layer_norm_eps": 1e-05,
125
+ "length_penalty": 1.0,
126
+ "max_length": 20,
127
+ "min_length": 0,
128
+ "model_type": "clip_vision_model",
129
+ "no_repeat_ngram_size": 0,
130
+ "num_attention_heads": 16,
131
+ "num_beam_groups": 1,
132
+ "num_beams": 1,
133
+ "num_channels": 3,
134
+ "num_hidden_layers": 32,
135
+ "num_return_sequences": 1,
136
+ "output_attentions": false,
137
+ "output_hidden_states": false,
138
+ "output_scores": false,
139
+ "pad_token_id": null,
140
+ "patch_size": 14,
141
+ "prefix": null,
142
+ "problem_type": null,
143
+ "projection_dim": 512,
144
+ "pruned_heads": {},
145
+ "remove_invalid_values": false,
146
+ "repetition_penalty": 1.0,
147
+ "return_dict": true,
148
+ "return_dict_in_generate": false,
149
+ "sep_token_id": null,
150
+ "suppress_tokens": null,
151
+ "task_specific_params": null,
152
+ "temperature": 1.0,
153
+ "tf_legacy_loss": false,
154
+ "tie_encoder_decoder": false,
155
+ "tie_word_embeddings": true,
156
+ "tokenizer_class": null,
157
+ "top_k": 50,
158
+ "top_p": 1.0,
159
+ "torch_dtype": null,
160
+ "torchscript": false,
161
+ "transformers_version": "4.27.1",
162
+ "typical_p": 1.0,
163
+ "use_bfloat16": false
164
+ }
165
+ }
clip/apple/DFN5B-CLIP-ViT-H-14/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
clip/apple/DFN5B-CLIP-ViT-H-14/open_clip_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_cfg": {
3
+ "embed_dim": 1024,
4
+ "quick_gelu": true,
5
+ "vision_cfg": {
6
+ "image_size": 224,
7
+ "layers": 32,
8
+ "width": 1280,
9
+ "head_width": 80,
10
+ "patch_size": 14
11
+ },
12
+ "text_cfg": {
13
+ "context_length": 77,
14
+ "vocab_size": 49408,
15
+ "width": 1024,
16
+ "heads": 16,
17
+ "layers": 24
18
+ }
19
+ },
20
+ "preprocess_cfg": {
21
+ "mean": [
22
+ 0.48145466,
23
+ 0.4578275,
24
+ 0.40821073
25
+ ],
26
+ "std": [
27
+ 0.26862954,
28
+ 0.26130258,
29
+ 0.27577711
30
+ ],
31
+ "interpolation": "bicubic",
32
+ "resize_mode": "squash"
33
+ }
34
+ }
clip/apple/DFN5B-CLIP-ViT-H-14/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
clip/apple/DFN5B-CLIP-ViT-H-14/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
clip/apple/DFN5B-CLIP-ViT-H-14/tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "do_lower_case": true,
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 77,
22
+ "name_or_path": "openai/clip-vit-large-patch14",
23
+ "pad_token": "<|endoftext|>",
24
+ "special_tokens_map_file": "./special_tokens_map.json",
25
+ "tokenizer_class": "CLIPTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
clip/apple/DFN5B-CLIP-ViT-H-14/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
clip/facebook/metaclip-l14-400m/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
clip/facebook/metaclip-l14-400m/README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ tags:
4
+ - vision
5
+ - metaclip
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
8
+ candidate_labels: playing music, playing sports
9
+ example_title: Cat & Dog
10
+ ---
11
+
12
+ # MetaCLIP model, large-sized version, patch resolution 14
13
+
14
+ MetaCLIP model applied to 400 million data points of CommonCrawl (CC). It was introduced in the paper [Demystifying CLIP Data](https://arxiv.org/abs/2309.16671) by Xu et al. and first released in [this repository](https://github.com/facebookresearch/MetaCLIP).
15
+
16
+ Disclaimer: The team releasing MetaCLIP did not write a model card for this model so this model card has been written by the Hugging Face team.
17
+
18
+ ## Model description
19
+
20
+ The [Demystifying CLIP Data](https://arxiv.org/abs/2309.16671) paper aims to reveal CLIP’s method around training data curation. OpenAI never open-sourced code regarding their data preparation pipeline.
21
+
22
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/clip_overview.jpg"
23
+ alt="drawing" width="600"/>
24
+
25
+ <small> CLIP high-level overview. Taken from the <a href="https://arxiv.org/abs/2103.00020">CLIP paper</a>. </small>
26
+
27
+ ## Intended uses & limitations
28
+
29
+ You can use the raw model for linking images with text in a shared embedding space. This enables things like zero-shot image classification, text-based image retrieval, image-based text retrieval, etc.
30
+
31
+ ### How to use
32
+
33
+ We refer to the [docs](https://huggingface.co/docs/transformers/main/en/model_doc/clip#usage). Just replace the names of the models on the hub.
34
+
35
+ ### BibTeX entry and citation info
36
+
37
+ ```bibtex
38
+ @misc{xu2023demystifying,
39
+ title={Demystifying CLIP Data},
40
+ author={Hu Xu and Saining Xie and Xiaoqing Ellen Tan and Po-Yao Huang and Russell Howes and Vasu Sharma and Shang-Wen Li and Gargi Ghosh and Luke Zettlemoyer and Christoph Feichtenhofer},
41
+ year={2023},
42
+ eprint={2309.16671},
43
+ archivePrefix={arXiv},
44
+ primaryClass={cs.CV}
45
+ }
46
+ ```
clip/facebook/metaclip-l14-400m/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 49407,
3
+ "<|startoftext|>": 49406
4
+ }
clip/facebook/metaclip-l14-400m/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
clip/facebook/metaclip-l14-400m/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "processor_class": "CLIPProcessor",
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
clip/facebook/metaclip-l14-400m/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|startoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
clip/facebook/metaclip-l14-400m/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": false
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": false
19
+ }
20
+ },
21
+ "additional_special_tokens": [],
22
+ "bos_token": "<|startoftext|>",
23
+ "clean_up_tokenization_spaces": true,
24
+ "do_lower_case": true,
25
+ "eos_token": "<|endoftext|>",
26
+ "errors": "replace",
27
+ "model_max_length": 77,
28
+ "pad_token": "<|endoftext|>",
29
+ "processor_class": "CLIPProcessor",
30
+ "tokenizer_class": "CLIPTokenizer",
31
+ "tokenizer_file": "/Users/georgebredis/.cache/huggingface/hub/models--openai--clip-vit-base-patch32/snapshots/e6a30b603a447e251fdaca1c3056b2a16cdfebeb/tokenizer.json",
32
+ "unk_token": "<|endoftext|>"
33
+ }
clip/facebook/metaclip-l14-400m/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
clip/laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K/open_clip_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6509f07e6fc0da68f8e1ee881bf90803f0b053d2f7ed2013cc7c3a49ac4dd3db
3
+ size 1710639109
clip/laion/CLIP-ViT-L-14-laion2B-s32B-b82K/open_clip_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d129ed747e0ed53e82dfcc140382b51be66b56e6a9bdc3258afd2846e3bb019
3
+ size 1710517748
clip/openai/clip-vit-large-patch14/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1a17cdbe0f36fec524f5cafb1c261ea3bbbc13e346e0f74fc9eb0460dedd0d3
3
+ size 1710671599
clip/openai/clip-vit-large-patch14/tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f154e925c18270d662d28f6261523c2ff6e80f1f05292cb034db41d5951c7a4
3
+ size 1711114176