Commit
·
75a84c7
verified
·
0
Parent(s):
Super-squash branch 'main' using huggingface_hub
Browse filesCo-authored-by: Xenova <[email protected]>
Co-authored-by: RyanMullins <[email protected]>
- .gitattributes +42 -0
- README.md +493 -0
- added_tokens.json +3 -0
- config.json +71 -0
- generation_config.json +7 -0
- onnx/model.onnx +3 -0
- onnx/model.onnx_data +3 -0
- onnx/model_fp16.onnx +3 -0
- onnx/model_fp16.onnx_data +3 -0
- onnx/model_no_gather_q4.onnx +3 -0
- onnx/model_no_gather_q4.onnx_data +3 -0
- onnx/model_q4.onnx +3 -0
- onnx/model_q4.onnx_data +3 -0
- onnx/model_q4f16.onnx +3 -0
- onnx/model_q4f16.onnx_data +3 -0
- onnx/model_quantized.onnx +3 -0
- onnx/model_quantized.onnx_data +3 -0
- special_tokens_map.json +33 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
.gitattributes
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
37 |
+
onnx/model_fp16.onnx_data filter=lfs diff=lfs merge=lfs -text
|
38 |
+
onnx/model_q4.onnx_data filter=lfs diff=lfs merge=lfs -text
|
39 |
+
onnx/model_q4f16.onnx_data filter=lfs diff=lfs merge=lfs -text
|
40 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
41 |
+
onnx/model_quantized.onnx_data filter=lfs diff=lfs merge=lfs -text
|
42 |
+
onnx/model_no_gather_q4.onnx_data filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: gemma
|
3 |
+
base_model:
|
4 |
+
- google/embeddinggemma-300M
|
5 |
+
pipeline_tag: sentence-similarity
|
6 |
+
library_name: transformers.js
|
7 |
+
---
|
8 |
+
|
9 |
+
# EmbeddingGemma model card
|
10 |
+
|
11 |
+
**Model Page**: [EmbeddingGemma](https://ai.google.dev/gemma/docs/embeddinggemma)
|
12 |
+
|
13 |
+
**Resources and Technical Documentation**:
|
14 |
+
|
15 |
+
* [Responsible Generative AI Toolkit](https://ai.google.dev/responsible)
|
16 |
+
* [EmbeddingGemma on Kaggle](https://www.kaggle.com/models/google/embeddinggemma/)
|
17 |
+
* [EmbeddingGemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/embeddinggemma)
|
18 |
+
|
19 |
+
**Terms of Use**: [Terms](https://ai.google.dev/gemma/terms)
|
20 |
+
|
21 |
+
**Authors**: Google DeepMind
|
22 |
+
|
23 |
+
## Model Information
|
24 |
+
|
25 |
+
### Description
|
26 |
+
|
27 |
+
EmbeddingGemma is a 300M parameter, state-of-the-art for its size, open embedding model from Google, built from Gemma 3 (with T5Gemma initialization) and the same research and technology used to create Gemini models. EmbeddingGemma produces vector representations of text, making it well-suited for search and retrieval tasks, including classification, clustering, and semantic similarity search. This model was trained with data in 100+ spoken languages.
|
28 |
+
|
29 |
+
The small size and on-device focus makes it possible to deploy in environments with limited resources such as mobile phones, laptops, or desktops, democratizing access to state of the art AI models and helping foster innovation for everyone.
|
30 |
+
|
31 |
+
### Inputs and outputs
|
32 |
+
|
33 |
+
- **Input:**
|
34 |
+
- Text string, such as a question, a prompt, or a document to be embedded
|
35 |
+
- Maximum input context length of 2048 tokens
|
36 |
+
|
37 |
+
- **Output:**
|
38 |
+
- Numerical vector representations of input text data
|
39 |
+
- Output embedding dimension size of 768, with smaller options available (512, 256, or 128) via Matryoshka Representation Learning (MRL). MRL allows users to truncate the output embedding of size 768 to their desired size and then re-normalize for efficient and accurate representation.
|
40 |
+
|
41 |
+
### Usage
|
42 |
+
|
43 |
+
These model weights are designed to be used with [Transformers.js](https://huggingface.co/docs/transformers.js/en/index).
|
44 |
+
|
45 |
+
**NOTE**: EmbeddingGemma activations do not support `float16` or its derivatives. Please use `float32`, `q8`, or `q4` as appropriate for your hardware.
|
46 |
+
|
47 |
+
#### Transformers.js in JavaScript
|
48 |
+
|
49 |
+
```js
|
50 |
+
import { AutoModel, AutoTokenizer, matmul } from "@huggingface/transformers";
|
51 |
+
|
52 |
+
// Download from the 🤗 Hub
|
53 |
+
const model_id = "onnx-community/embeddinggemma-300M-ONNX";
|
54 |
+
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
|
55 |
+
const model = await AutoModel.from_pretrained(model_id, {
|
56 |
+
dtype: "fp32", // Options: "fp32" | "q8" | "q4".
|
57 |
+
});
|
58 |
+
|
59 |
+
// Run inference with queries and documents
|
60 |
+
const prefixes = {
|
61 |
+
query: "task: search result | query: ",
|
62 |
+
document: "title: none | text: ",
|
63 |
+
};
|
64 |
+
const query = prefixes.query + "Which planet is known as the Red Planet?";
|
65 |
+
const documents = [
|
66 |
+
"Venus is often called Earth's twin because of its similar size and proximity.",
|
67 |
+
"Mars, known for its reddish appearance, is often referred to as the Red Planet.",
|
68 |
+
"Jupiter, the largest planet in our solar system, has a prominent red spot.",
|
69 |
+
"Saturn, famous for its rings, is sometimes mistaken for the Red Planet.",
|
70 |
+
].map((x) => prefixes.document + x);
|
71 |
+
|
72 |
+
const inputs = await tokenizer([query, ...documents], { padding: true });
|
73 |
+
const { sentence_embedding } = await model(inputs);
|
74 |
+
|
75 |
+
// Compute similarities to determine a ranking
|
76 |
+
const scores = await matmul(sentence_embedding, sentence_embedding.transpose(1, 0));
|
77 |
+
const similarities = scores.tolist()[0].slice(1);
|
78 |
+
console.log(similarities);
|
79 |
+
// [ 0.30109718441963196, 0.6358831524848938, 0.4930494725704193, 0.48887503147125244 ]
|
80 |
+
|
81 |
+
// Convert similarities to a ranking
|
82 |
+
const ranking = similarities.map((score, index) => ({ index, score })).sort((a, b) => b.score - a.score);
|
83 |
+
console.log(ranking);
|
84 |
+
// [
|
85 |
+
// { index: 1, score: 0.6358831524848938 },
|
86 |
+
// { index: 2, score: 0.4930494725704193 },
|
87 |
+
// { index: 3, score: 0.48887503147125244 },
|
88 |
+
// { index: 0, score: 0.30109718441963196 }
|
89 |
+
// ]
|
90 |
+
```
|
91 |
+
|
92 |
+
#### Using the ONNX Runtime in Python
|
93 |
+
|
94 |
+
```py
|
95 |
+
from huggingface_hub import hf_hub_download
|
96 |
+
import onnxruntime as ort
|
97 |
+
from transformers import AutoTokenizer
|
98 |
+
|
99 |
+
# Download from the 🤗 Hub
|
100 |
+
model_id = "onnx-community/embeddinggemma-300M-ONNX"
|
101 |
+
model_path = hf_hub_download(model_id, subfolder="onnx", filename="model.onnx") # Download graph
|
102 |
+
hf_hub_download(model_id, subfolder="onnx", filename="model.onnx_data") # Download weights
|
103 |
+
session = ort.InferenceSession(model_path)
|
104 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
105 |
+
|
106 |
+
# Run inference with queries and documents
|
107 |
+
prefixes = {
|
108 |
+
"query": "task: search result | query: ",
|
109 |
+
"document": "title: none | text: ",
|
110 |
+
}
|
111 |
+
query = prefixes["query"] + "Which planet is known as the Red Planet?"
|
112 |
+
documents = [
|
113 |
+
"Venus is often called Earth's twin because of its similar size and proximity.",
|
114 |
+
"Mars, known for its reddish appearance, is often referred to as the Red Planet.",
|
115 |
+
"Jupiter, the largest planet in our solar system, has a prominent red spot.",
|
116 |
+
"Saturn, famous for its rings, is sometimes mistaken for the Red Planet."
|
117 |
+
]
|
118 |
+
documents = [prefixes["document"] + x for x in documents]
|
119 |
+
|
120 |
+
inputs = tokenizer([query] + documents, padding=True, return_tensors="np")
|
121 |
+
|
122 |
+
_, sentence_embedding = session.run(None, inputs.data)
|
123 |
+
print(sentence_embedding.shape) # (5, 768)
|
124 |
+
|
125 |
+
# Compute similarities to determine a ranking
|
126 |
+
query_embeddings = sentence_embedding[0]
|
127 |
+
document_embeddings = sentence_embedding[1:]
|
128 |
+
similarities = query_embeddings @ document_embeddings.T
|
129 |
+
print(similarities) # [0.30109745 0.635883 0.49304956 0.48887485]
|
130 |
+
|
131 |
+
# Convert similarities to a ranking
|
132 |
+
ranking = similarities.argsort()[::-1]
|
133 |
+
print(ranking) # [1 2 3 0]
|
134 |
+
```
|
135 |
+
|
136 |
+
## Model Data
|
137 |
+
|
138 |
+
### Training Dataset
|
139 |
+
|
140 |
+
This model was trained on a dataset of text data that includes a wide variety of sources totaling approximately 320 billion tokens. Here are the key components:
|
141 |
+
|
142 |
+
- **Web Documents**: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. The training dataset includes content in over 100 languages.
|
143 |
+
- **Code and Technical Documents**: Exposing the model to code and technical documentation helps it learn the structure and patterns of programming languages and specialized scientific content, which improves its understanding of code and technical questions.
|
144 |
+
- **Synthetic and Task-Specific Data**: Synthetically training data helps to teach the model specific skills. This includes curated data for tasks like information retrieval, classification, and sentiment analysis, which helps to fine-tune its performance for common embedding applications.
|
145 |
+
|
146 |
+
The combination of these diverse data sources is crucial for training a powerful multilingual embedding model that can handle a wide variety of different tasks and data formats.
|
147 |
+
|
148 |
+
### Data Preprocessing
|
149 |
+
|
150 |
+
Here are the key data cleaning and filtering methods applied to the training data:
|
151 |
+
|
152 |
+
- CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content.
|
153 |
+
- Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets.
|
154 |
+
- Additional methods: Filtering based on content quality and safety in line with [our policies](https://ai.google/static/documents/ai-responsibility-update-published-february-2025.pdf).
|
155 |
+
|
156 |
+
## Model Development
|
157 |
+
|
158 |
+
### Hardware
|
159 |
+
|
160 |
+
EmbeddingGemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e), for more details refer to the [Gemma 3 model card](https://ai.google.dev/gemma/docs/core/model_card_3).
|
161 |
+
|
162 |
+
### Software
|
163 |
+
|
164 |
+
Training was done using [JAX](https://github.com/jax-ml/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/). For more details refer to the [Gemma 3 model card](https://ai.google.dev/gemma/docs/core/model_card_3).
|
165 |
+
|
166 |
+
## Evaluation
|
167 |
+
|
168 |
+
### Benchmark Results
|
169 |
+
|
170 |
+
The model was evaluated against a large collection of different datasets and metrics to cover different aspects of text understanding.
|
171 |
+
|
172 |
+
#### Full Precision Checkpoint
|
173 |
+
|
174 |
+
<table>
|
175 |
+
<thead>
|
176 |
+
<tr>
|
177 |
+
<th colspan="3"><strong>MTEB (Multilingual, v2)</strong></th>
|
178 |
+
</tr>
|
179 |
+
</thead>
|
180 |
+
<tbody>
|
181 |
+
<tr>
|
182 |
+
<td><strong>Dimensionality</strong></td>
|
183 |
+
<td><strong>Mean (Task)</strong></td>
|
184 |
+
<td><strong>Mean (TaskType)</strong></td>
|
185 |
+
</tr>
|
186 |
+
<tr>
|
187 |
+
<td>768d</td>
|
188 |
+
<td>61.15</td>
|
189 |
+
<td>54.31</td>
|
190 |
+
</tr>
|
191 |
+
<tr>
|
192 |
+
<td>512d</td>
|
193 |
+
<td>60.71</td>
|
194 |
+
<td>53.89</td>
|
195 |
+
</tr>
|
196 |
+
<tr>
|
197 |
+
<td>256d</td>
|
198 |
+
<td>59.68</td>
|
199 |
+
<td>53.01</td>
|
200 |
+
</tr>
|
201 |
+
<tr>
|
202 |
+
<td>128d</td>
|
203 |
+
<td>58.23</td>
|
204 |
+
<td>51.77</td>
|
205 |
+
</tr>
|
206 |
+
</tbody>
|
207 |
+
</table>
|
208 |
+
|
209 |
+
<table>
|
210 |
+
<thead>
|
211 |
+
<tr>
|
212 |
+
<th colspan="3"><strong>MTEB (English, v2)</strong></th>
|
213 |
+
</tr>
|
214 |
+
</thead>
|
215 |
+
<tbody>
|
216 |
+
<tr>
|
217 |
+
<td><strong>Dimensionality</strong></td>
|
218 |
+
<td><strong>Mean (Task)</strong></td>
|
219 |
+
<td><strong>Mean (TaskType)</strong></td>
|
220 |
+
</tr>
|
221 |
+
<tr>
|
222 |
+
<td>768d</td>
|
223 |
+
<td>68.36</td>
|
224 |
+
<td>64.15</td>
|
225 |
+
</tr>
|
226 |
+
<tr>
|
227 |
+
<td>512d</td>
|
228 |
+
<td>67.80</td>
|
229 |
+
<td>63.59</td>
|
230 |
+
</tr>
|
231 |
+
<tr>
|
232 |
+
<td>256d</td>
|
233 |
+
<td>66.89</td>
|
234 |
+
<td>62.94</td>
|
235 |
+
</tr>
|
236 |
+
<tr>
|
237 |
+
<td>128d</td>
|
238 |
+
<td>65.09</td>
|
239 |
+
<td>61.56</td>
|
240 |
+
</tr>
|
241 |
+
</tbody>
|
242 |
+
</table>
|
243 |
+
|
244 |
+
<table>
|
245 |
+
<thead>
|
246 |
+
<tr>
|
247 |
+
<th colspan="3"><strong>MTEB (Code, v1)</strong></th>
|
248 |
+
</tr>
|
249 |
+
</thead>
|
250 |
+
<tbody>
|
251 |
+
<tr>
|
252 |
+
<td><strong>Dimensionality</strong></td>
|
253 |
+
<td><strong>Mean (Task)</strong></td>
|
254 |
+
<td><strong>Mean (TaskType)</strong></td>
|
255 |
+
</tr>
|
256 |
+
<tr>
|
257 |
+
<td>768d</td>
|
258 |
+
<td>68.76</td>
|
259 |
+
<td>68.76</td>
|
260 |
+
</tr>
|
261 |
+
<tr>
|
262 |
+
<td>512d</td>
|
263 |
+
<td>68.48</td>
|
264 |
+
<td>68.48</td>
|
265 |
+
</tr>
|
266 |
+
<tr>
|
267 |
+
<td>256d</td>
|
268 |
+
<td>66.74</td>
|
269 |
+
<td>66.74</td>
|
270 |
+
</tr>
|
271 |
+
<tr>
|
272 |
+
<td>128d</td>
|
273 |
+
<td>62.96</td>
|
274 |
+
<td>62.96</td>
|
275 |
+
</tr>
|
276 |
+
</tbody>
|
277 |
+
</table>
|
278 |
+
|
279 |
+
#### QAT Checkpoints
|
280 |
+
|
281 |
+
<table>
|
282 |
+
<thead>
|
283 |
+
<tr>
|
284 |
+
<th colspan="3"><strong>MTEB (Multilingual, v2)</strong></th>
|
285 |
+
</tr>
|
286 |
+
</thead>
|
287 |
+
<tbody>
|
288 |
+
<tr>
|
289 |
+
<td><strong>Quant config (dimensionality)</strong></td>
|
290 |
+
<td><strong>Mean (Task)</strong></td>
|
291 |
+
<td><strong>Mean (TaskType)</strong></td>
|
292 |
+
</tr>
|
293 |
+
<tr>
|
294 |
+
<td>Q4_0 (768d)</td>
|
295 |
+
<td>60.62</td>
|
296 |
+
<td>53.61</td>
|
297 |
+
</tr>
|
298 |
+
<tr>
|
299 |
+
<td>Q8_0 (768d)</td>
|
300 |
+
<td>60.93</td>
|
301 |
+
<td>53.95</td>
|
302 |
+
</tr>
|
303 |
+
<tr>
|
304 |
+
<td>Mixed Precision* (768d)</td>
|
305 |
+
<td>60.69</td>
|
306 |
+
<td>53.82</td>
|
307 |
+
</tr>
|
308 |
+
</tbody>
|
309 |
+
</table>
|
310 |
+
|
311 |
+
<table>
|
312 |
+
<thead>
|
313 |
+
<tr>
|
314 |
+
<th colspan="3"><strong>MTEB (English, v2)</strong></th>
|
315 |
+
</tr>
|
316 |
+
</thead>
|
317 |
+
<tbody>
|
318 |
+
<tr>
|
319 |
+
<td><strong>Quant config (dimensionality)</strong></td>
|
320 |
+
<td><strong>Mean (Task)</strong></td>
|
321 |
+
<td><strong>Mean (TaskType)</strong></td>
|
322 |
+
</tr>
|
323 |
+
<tr>
|
324 |
+
<td>Q4_0 (768d)</td>
|
325 |
+
<td>67.91</td>
|
326 |
+
<td>63.64</td>
|
327 |
+
</tr>
|
328 |
+
<tr>
|
329 |
+
<td>Q8_0 (768d)</td>
|
330 |
+
<td>68.13</td>
|
331 |
+
<td>63.85</td>
|
332 |
+
</tr>
|
333 |
+
<tr>
|
334 |
+
<td>Mixed Precision* (768d)</td>
|
335 |
+
<td>67.95</td>
|
336 |
+
<td>63.83</td>
|
337 |
+
</tr>
|
338 |
+
</tbody>
|
339 |
+
</table>
|
340 |
+
|
341 |
+
<table>
|
342 |
+
<thead>
|
343 |
+
<tr>
|
344 |
+
<th colspan="3"><strong>MTEB (Code, v1)</strong></th>
|
345 |
+
</tr>
|
346 |
+
</thead>
|
347 |
+
<tbody>
|
348 |
+
<tr>
|
349 |
+
<td><strong>Quant config (dimensionality)</strong></td>
|
350 |
+
<td><strong>Mean (Task)</strong></td>
|
351 |
+
<td><strong>Mean (TaskType)</strong></td>
|
352 |
+
</tr>
|
353 |
+
<tr>
|
354 |
+
<td>Q4_0 (768d)</td>
|
355 |
+
<td>67.99</td>
|
356 |
+
<td>67.99</td>
|
357 |
+
</tr>
|
358 |
+
<tr>
|
359 |
+
<td>Q8_0 (768d)</td>
|
360 |
+
<td>68.70</td>
|
361 |
+
<td>68.70</td>
|
362 |
+
</tr>
|
363 |
+
<tr>
|
364 |
+
<td>Mixed Precision* (768d)</td>
|
365 |
+
<td>68.03</td>
|
366 |
+
<td>68.03</td>
|
367 |
+
</tr>
|
368 |
+
</tbody>
|
369 |
+
</table>
|
370 |
+
|
371 |
+
Note: QAT models are evaluated after quantization
|
372 |
+
|
373 |
+
\* Mixed Precision refers to per-channel quantization with int4 for embeddings, feedforward, and projection layers, and int8 for attention (e4_a8_f4_p4).
|
374 |
+
|
375 |
+
### Prompt Instructions
|
376 |
+
|
377 |
+
EmbeddingGemma can generate optimized embeddings for various use cases—such as document retrieval, question answering, and fact verification—or for specific input types—either a query or a document—using prompts that are prepended to the input strings.
|
378 |
+
Query prompts follow the form `task: {task description} | query: ` where the task description varies by the use case, with the default task description being `search result`. Document-style prompts follow the form `title: {title | "none"} | text: ` where the title is either `none` (the default) or the actual title of the document. Note that providing a title, if available, will improve model performance for document prompts but may require manual formatting.
|
379 |
+
|
380 |
+
Use the following prompts based on your use case and input data type. These may already be available in the EmbeddingGemma configuration in your modeling framework of choice.
|
381 |
+
|
382 |
+
<table>
|
383 |
+
<thead>
|
384 |
+
<tr>
|
385 |
+
<th><br>
|
386 |
+
<strong>Use Case (task type enum)</strong></th>
|
387 |
+
<th><br>
|
388 |
+
<strong>Descriptions</strong></th>
|
389 |
+
<th><br>
|
390 |
+
<strong>Recommended Prompt</strong></th>
|
391 |
+
</tr>
|
392 |
+
</thead>
|
393 |
+
<tbody>
|
394 |
+
<tr>
|
395 |
+
<td><br>
|
396 |
+
Retrieval (Query)</td>
|
397 |
+
<td rowspan="4"><br>
|
398 |
+
Used to generate embeddings that are optimized for document search or information retrieval</td>
|
399 |
+
<td><br>
|
400 |
+
task: search result | query: {content}</td>
|
401 |
+
</tr>
|
402 |
+
<tr>
|
403 |
+
<td><br>
|
404 |
+
Retrieval (Document)</td>
|
405 |
+
<td><br>
|
406 |
+
title: {title | "none"} | text: {content}</td>
|
407 |
+
</tr>
|
408 |
+
<tr>
|
409 |
+
<td><br>
|
410 |
+
Question Answering</td>
|
411 |
+
<td><br>
|
412 |
+
task: question answering | query: {content}</td>
|
413 |
+
</tr>
|
414 |
+
<tr>
|
415 |
+
<td><br>
|
416 |
+
Fact Verification</td>
|
417 |
+
<td><br>
|
418 |
+
task: fact checking | query: {content}</td>
|
419 |
+
</tr>
|
420 |
+
<tr>
|
421 |
+
<td><br>
|
422 |
+
Classification</td>
|
423 |
+
<td><br>
|
424 |
+
Used to generate embeddings that are optimized to classify texts according to preset labels</td>
|
425 |
+
<td><br>
|
426 |
+
task: classification | query: {content}</td>
|
427 |
+
</tr>
|
428 |
+
<tr>
|
429 |
+
<td><br>
|
430 |
+
Clustering</td>
|
431 |
+
<td><br>
|
432 |
+
Used to generate embeddings that are optimized to cluster texts based on their similarities</td>
|
433 |
+
<td><br>
|
434 |
+
task: clustering | query: {content}</td>
|
435 |
+
</tr>
|
436 |
+
<tr>
|
437 |
+
<td><br>
|
438 |
+
Semantic Similarity</td>
|
439 |
+
<td><br>
|
440 |
+
Used to generate embeddings that are optimized to assess text similarity. This is not intended for retrieval use cases.</td>
|
441 |
+
<td><br>
|
442 |
+
task: sentence similarity | query: {content}</td>
|
443 |
+
</tr>
|
444 |
+
<tr>
|
445 |
+
<td><br>
|
446 |
+
Code Retrieval</td>
|
447 |
+
<td><br>
|
448 |
+
Used to retrieve a code block based on a natural language query, such as <em>sort an array</em> or <em>reverse a linked list</em>. Embeddings of the code blocks are computed using retrieval_document.</td>
|
449 |
+
<td><br>
|
450 |
+
task: code retrieval | query: {content}</td>
|
451 |
+
</tr>
|
452 |
+
</tbody>
|
453 |
+
</table>
|
454 |
+
|
455 |
+
## Usage and Limitations
|
456 |
+
|
457 |
+
These models have certain limitations that users should be aware of.
|
458 |
+
|
459 |
+
### Intended Usage
|
460 |
+
|
461 |
+
Open embedding models have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development.
|
462 |
+
|
463 |
+
- **Semantic Similarity**: Embeddings optimized to assess text similarity, such as recommendation systems and duplicate detection
|
464 |
+
- **Classification**: Embeddings optimized to classify texts according to preset labels, such as sentiment analysis and spam detection
|
465 |
+
- **Clustering**: Embeddings optimized to cluster texts based on their similarities, such as document organization, market research, and anomaly detection
|
466 |
+
- **Retrieval**
|
467 |
+
- **Document**: Embeddings optimized for document search, such as indexing articles, books, or web pages for search
|
468 |
+
- **Query**: Embeddings optimized for general search queries, such as custom search
|
469 |
+
- **Code Query**: Embeddings optimized for retrieval of code blocks based on natural language queries, such as code suggestions and search
|
470 |
+
|
471 |
+
- **Question Answering**: Embeddings for questions in a question-answering system, optimized for finding documents that answer the question, such as chatbox.
|
472 |
+
- **Fact Verification**: Embeddings for statements that need to be verified, optimized for retrieving documents that contain evidence supporting or refuting the statement, such as automated fact-checking systems.
|
473 |
+
|
474 |
+
### Limitations
|
475 |
+
|
476 |
+
- Training Data
|
477 |
+
- The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses.
|
478 |
+
- The scope of the training dataset determines the subject areas the model can handle effectively.
|
479 |
+
|
480 |
+
- Language Ambiguity and Nuance
|
481 |
+
- Natural language is inherently complex. Models might struggle to grasp subtle nuances, sarcasm, or figurative language.
|
482 |
+
|
483 |
+
### Ethical Considerations and Risks
|
484 |
+
|
485 |
+
Risks identified and mitigations:
|
486 |
+
|
487 |
+
- **Perpetuation of biases**: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases.
|
488 |
+
- **Misuse for malicious purposes**: Technical limitations and developer and end-user education can help mitigate against malicious applications of embeddings. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy).
|
489 |
+
- **Privacy violations**: Models were trained on data filtered for removal of certain personal information and other sensitive data. Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques.
|
490 |
+
|
491 |
+
### Benefits
|
492 |
+
|
493 |
+
At the time of release, this family of models provides high-performance open embedding model implementations designed from the ground up for responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown superior performance to other, comparably-sized open model alternatives.
|
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<image_soft_token>": 262144
|
3 |
+
}
|
config.json
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_sliding_window_pattern": 6,
|
3 |
+
"architectures": [
|
4 |
+
"Gemma3TextModel"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"attn_logit_softcapping": null,
|
9 |
+
"bos_token_id": 2,
|
10 |
+
"dtype": "float32",
|
11 |
+
"eos_token_id": 1,
|
12 |
+
"final_logit_softcapping": null,
|
13 |
+
"head_dim": 256,
|
14 |
+
"hidden_activation": "gelu_pytorch_tanh",
|
15 |
+
"hidden_size": 768,
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 1152,
|
18 |
+
"layer_types": [
|
19 |
+
"sliding_attention",
|
20 |
+
"sliding_attention",
|
21 |
+
"sliding_attention",
|
22 |
+
"sliding_attention",
|
23 |
+
"sliding_attention",
|
24 |
+
"full_attention",
|
25 |
+
"sliding_attention",
|
26 |
+
"sliding_attention",
|
27 |
+
"sliding_attention",
|
28 |
+
"sliding_attention",
|
29 |
+
"sliding_attention",
|
30 |
+
"full_attention",
|
31 |
+
"sliding_attention",
|
32 |
+
"sliding_attention",
|
33 |
+
"sliding_attention",
|
34 |
+
"sliding_attention",
|
35 |
+
"sliding_attention",
|
36 |
+
"full_attention",
|
37 |
+
"sliding_attention",
|
38 |
+
"sliding_attention",
|
39 |
+
"sliding_attention",
|
40 |
+
"sliding_attention",
|
41 |
+
"sliding_attention",
|
42 |
+
"full_attention"
|
43 |
+
],
|
44 |
+
"max_position_embeddings": 2048,
|
45 |
+
"model_type": "gemma3_text",
|
46 |
+
"num_attention_heads": 3,
|
47 |
+
"num_hidden_layers": 24,
|
48 |
+
"num_key_value_heads": 1,
|
49 |
+
"pad_token_id": 0,
|
50 |
+
"query_pre_attn_scalar": 256,
|
51 |
+
"rms_norm_eps": 1e-06,
|
52 |
+
"rope_local_base_freq": 10000.0,
|
53 |
+
"rope_scaling": null,
|
54 |
+
"rope_theta": 1000000.0,
|
55 |
+
"sliding_window": 512,
|
56 |
+
"transformers_version": "4.57.0.dev0",
|
57 |
+
"use_bidirectional_attention": true,
|
58 |
+
"use_cache": true,
|
59 |
+
"vocab_size": 262144,
|
60 |
+
"transformers.js_config": {
|
61 |
+
"use_external_data_format": {
|
62 |
+
"model.onnx": 1,
|
63 |
+
"model_fp16.onnx": 1,
|
64 |
+
"model_quantized.onnx": 1,
|
65 |
+
"model_q4.onnx": 1,
|
66 |
+
"model_q4f16.onnx": 1,
|
67 |
+
"model_no_gather_q4.onnx": 1
|
68 |
+
},
|
69 |
+
"kv_cache_dtype": false
|
70 |
+
}
|
71 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cache_implementation": "hybrid",
|
3 |
+
"do_sample": true,
|
4 |
+
"top_k": 64,
|
5 |
+
"top_p": 0.95,
|
6 |
+
"transformers_version": "4.57.0.dev0"
|
7 |
+
}
|
onnx/model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea91fd315a7c152d427d231746f0f811a1ac93beaba656abfdf2b24e091265e4
|
3 |
+
size 479932
|
onnx/model.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef835ae565d8695236652475903078e8ed794c7c35faf1164d78ec3238e8a88d
|
3 |
+
size 1234521088
|
onnx/model_fp16.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcfaf21ff7cae91af9295366ac0d7352efcadeaf7deefb98f82d5056502d0bf2
|
3 |
+
size 655263
|
onnx/model_fp16.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1cd839755aa8e24d5af7f16ef275b12d717a4401bb009099b8c17e4156d3d5d5
|
3 |
+
size 617434112
|
onnx/model_no_gather_q4.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2bae6947463c5eec82bd8e9a66483b40cef40fdf2b874f3e180f3ea88be34f0
|
3 |
+
size 529476
|
onnx/model_no_gather_q4.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c315bb5ff8170ff0a0d6878d4bf852c0f14534ac80bb7f42645bafdcb4ce230
|
3 |
+
size 194628608
|
onnx/model_q4.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad1dfee81a70f7944b9b9d1cc6e48075b832881cf33fab2f2b248be78f3f0043
|
3 |
+
size 519322
|
onnx/model_q4.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:599962c3143b040de2dd05e5975be3e9091dd067cacc6a8f7186e3203bab9e02
|
3 |
+
size 196725760
|
onnx/model_q4f16.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4df4a2a44253865800b8882a497badf67c2707a487267460813f78da339c753f
|
3 |
+
size 705221
|
onnx/model_q4f16.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9cc456a345e6aa9bc5fb75b54c10b3e0edbb4f80708f749dc4c45dbed5b6edf
|
3 |
+
size 175410176
|
onnx/model_quantized.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:172efde319fe1542dc41f31be6154910b05b78f7a861c265c4600eec906bd6d8
|
3 |
+
size 567874
|
onnx/model_quantized.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:705626e28e4c23c82ade34566b4197d97f534c12275fa406dfb71e9937d388c0
|
3 |
+
size 308890624
|
special_tokens_map.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"boi_token": "<start_of_image>",
|
3 |
+
"bos_token": {
|
4 |
+
"content": "<bos>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false
|
9 |
+
},
|
10 |
+
"eoi_token": "<end_of_image>",
|
11 |
+
"eos_token": {
|
12 |
+
"content": "<eos>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false
|
17 |
+
},
|
18 |
+
"image_token": "<image_soft_token>",
|
19 |
+
"pad_token": {
|
20 |
+
"content": "<pad>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false
|
25 |
+
},
|
26 |
+
"unk_token": {
|
27 |
+
"content": "<unk>",
|
28 |
+
"lstrip": false,
|
29 |
+
"normalized": false,
|
30 |
+
"rstrip": false,
|
31 |
+
"single_word": false
|
32 |
+
}
|
33 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dda02faaf32bc91031dc8c88457ac272b00c1016cc679757d1c441b248b9c47
|
3 |
+
size 20323312
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
|
3 |
+
size 4689074
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|