|
import gradio as gr |
|
import pandas as pd |
|
|
|
|
|
|
|
|
|
embedding_models = [ |
|
'<a href="https://docs.aws.amazon.com/bedrock/latest/userguide/titan-multiemb-models.html">Amazon-Titan-MultiModal</a>', |
|
'<a href="https://huggingface.co/jinaai/jina-clip-v1">Jina-V1-CLIP</a>', |
|
'<a href="https://cohere.com/blog/introducing-embed-v3">Cohere-embedding-v3</a>', |
|
'<a href="https://cloud.google.com/vertex-ai">GCP-Vertex</a>', |
|
'<a href="https://huggingface.co/timm/ViT-SO400M-14-SigLIP">ViT-SO400M-14-SigLip</a>', |
|
'<a href="https://huggingface.co/timm/ViT-B-16-SigLIP">ViT-B-16-SigLip</a>', |
|
'<a href="https://huggingface.co/timm/ViT-L-16-SigLIP-384">ViT-L-16-SigLip</a>', |
|
'<a href="https://huggingface.co/Marqo/marqo-ecommerce-embeddings-B">Marqo-Ecommerce-B</a>', |
|
'<a href="https://huggingface.co/Marqo/marqo-ecommerce-embeddings-L">Marqo-Ecommerce-L</a>' |
|
] |
|
|
|
|
|
gs_text2_image_1m_mAP = [0.694, 0.480, 0.358, 0.740, 0.792, 0.701, 0.754, 0.842, 0.879] |
|
gs_text2_image_1m_Recall = [0.868, 0.638, 0.515, 0.910, 0.935, 0.87, 0.907, 0.961, 0.971] |
|
gs_text2_image_1m_MRR = [0.693, 0.480, 0.358, 0.740, 0.792, 0.701, 0.754, 0.842, 0.879] |
|
gs_text2_image_1m_ndcg = [0.733, 0.511, 0.389, 0.779, 0.825, 0.739, 0.789, 0.871, 0.901] |
|
|
|
gs_text2_image_1m_data = { |
|
"Embedding Model": embedding_models, |
|
"mAP": gs_text2_image_1m_mAP, |
|
"R@10": gs_text2_image_1m_Recall, |
|
"MRR": gs_text2_image_1m_MRR, |
|
"nDCG@10": gs_text2_image_1m_ndcg |
|
} |
|
gs_text2_image_1m_df = pd.DataFrame(gs_text2_image_1m_data) |
|
gs_text2_image_1m_df = pd.DataFrame(gs_text2_image_1m_df).sort_values(by="mAP", ascending=False).reset_index(drop=True) |
|
|
|
|
|
gs_category2_image_1m_mAP = [0.308, 0.175, 0.136, 0.417, 0.423, 0.347, 0.392, 0.479, 0.515] |
|
gs_category2_image_1m_Precision = [0.231, 0.122, 0.110, 0.298, 0.302, 0.252, 0.281, 0.336, 0.358] |
|
gs_category2_image_1m_MRR = [0.558, 0.369, 0.315, 0.636, 0.644, 0.594, 0.627, 0.744, 0.764] |
|
gs_category2_image_1m_ndcg = [0.377, 0.229, 0.178, 0.481, 0.487, 0.414, 0.458, 0.558, 0.590] |
|
|
|
gs_category2_image_1m_data = { |
|
"Embedding Model": embedding_models, |
|
"mAP": gs_category2_image_1m_mAP, |
|
"P@10": gs_category2_image_1m_Precision, |
|
"MRR": gs_category2_image_1m_MRR, |
|
"nDCG@10": gs_category2_image_1m_ndcg |
|
} |
|
gs_category2_image_1m_df = pd.DataFrame(gs_category2_image_1m_data) |
|
gs_category2_image_1m_df = pd.DataFrame(gs_category2_image_1m_df).sort_values(by="mAP", ascending=False).reset_index(drop=True) |
|
|
|
|
|
ap_text2_image_3m_mAP = [0.762, 0.530, 0.433, 0.808, 0.860, 0.797, 0.842, 0.897, 0.928] |
|
ap_text2_image_3m_Recall = [0.889, 0.699, 0.597, 0.933, 0.954, 0.917, 0.940, 0.967, 0.978] |
|
ap_text2_image_3m_MRR = [0.763, 0.530, 0.433, 0.808, 0.860, 0.797, 0.842, 0.897, 0.928] |
|
ap_text2_image_3m_ndcg = [0.791, 0.565, 0.465, 0.837, 0.882, 0.825, 0.865, 0.914, 0.940] |
|
|
|
ap_text2_image_3m_data = { |
|
"Embedding Model": embedding_models, |
|
"mAP": ap_text2_image_3m_mAP, |
|
"R@10": ap_text2_image_3m_Recall, |
|
"MRR": ap_text2_image_3m_MRR, |
|
"nDCG@10": ap_text2_image_3m_ndcg |
|
} |
|
ap_text2_image_3m_df = pd.DataFrame(ap_text2_image_3m_data) |
|
ap_text2_image_3m_df = pd.DataFrame(ap_text2_image_3m_df).sort_values(by="mAP", ascending=False).reset_index(drop=True) |
|
|
|
|
|
|
|
|
|
|
|
hard_embedding_models = [ |
|
'<a href="https://docs.aws.amazon.com/bedrock/latest/userguide/titan-multiemb-models.html">Amazon-Titan-MultiModal</a>', |
|
'<a href="https://huggingface.co/jinaai/jina-clip-v1">Jina-V1-CLIP</a>', |
|
'<a href="https://huggingface.co/timm/ViT-SO400M-14-SigLIP">ViT-SO400M-14-SigLip</a>', |
|
'<a href="https://huggingface.co/timm/ViT-B-16-SigLIP">ViT-B-16-SigLip</a>', |
|
'<a href="https://huggingface.co/timm/ViT-L-16-SigLIP-384">ViT-L-16-SigLip</a>', |
|
'<a href="https://huggingface.co/Marqo/marqo-ecommerce-embeddings-B">Marqo-Ecommerce-B</a>', |
|
'<a href="https://huggingface.co/Marqo/marqo-ecommerce-embeddings-L">Marqo-Ecommerce-L</a>' |
|
] |
|
|
|
|
|
hard_gs_text2_image_1m_mAP = [0.475, 0.285, 0.573, 0.476, 0.540, 0.623, 0.682] |
|
hard_gs_text2_image_1m_Recall = [0.648, 0.402, 0.763, 0.660, 0.722, 0.832, 0.878] |
|
hard_gs_text2_image_1m_MRR = [0.475, 0.285, 0.574, 0.477, 0.540, 0.624, 0.683] |
|
hard_gs_text2_image_1m_ndcg = [0.509, 0.306, 0.613, 0.513, 0.577, 0.668, 0.726] |
|
|
|
hard_gs_text2_image_1m_data = { |
|
"Embedding Model": hard_embedding_models, |
|
"mAP": hard_gs_text2_image_1m_mAP, |
|
"R@10": hard_gs_text2_image_1m_Recall, |
|
"MRR": hard_gs_text2_image_1m_MRR, |
|
"nDCG@10": hard_gs_text2_image_1m_ndcg |
|
} |
|
hard_gs_text2_image_1m_df = pd.DataFrame(hard_gs_text2_image_1m_data) |
|
hard_gs_text2_image_1m_df = pd.DataFrame(hard_gs_text2_image_1m_df).sort_values(by="mAP", ascending=False).reset_index(drop=True) |
|
|
|
|
|
hard_gs_category2_image_1m_mAP = [0.246, 0.123, 0.352, 0.277, 0.324, 0.423, 0.463] |
|
hard_gs_category2_image_1m_Precision = [0.429, 0.275, 0.516, 0.458, 0.497, 0.629, 0.652] |
|
hard_gs_category2_image_1m_MRR = [0.642, 0.504, 0.707, 0.660, 0.687, 0.810, 0.822] |
|
hard_gs_category2_image_1m_ndcg = [0.446, 0.294, 0.529, 0.473, 0.509, 0.644, 0.666] |
|
|
|
hard_gs_category2_image_1m_data = { |
|
"Embedding Model": hard_embedding_models, |
|
"mAP": hard_gs_category2_image_1m_mAP, |
|
"P@10": hard_gs_category2_image_1m_Precision, |
|
"MRR": hard_gs_category2_image_1m_MRR, |
|
"nDCG@10": hard_gs_category2_image_1m_ndcg |
|
} |
|
hard_gs_category2_image_1m_df = pd.DataFrame(hard_gs_category2_image_1m_data) |
|
hard_gs_category2_image_1m_df = pd.DataFrame(hard_gs_category2_image_1m_df).sort_values(by="mAP", ascending=False).reset_index(drop=True) |
|
|
|
|
|
hard_ap_text2_image_3m_mAP = [0.456, 0.265, 0.560, 0.480, 0.544, 0.592, 0.658] |
|
hard_ap_text2_image_3m_Recall = [0.627, 0.378, 0.742, 0.650, 0.715, 0.795, 0.854] |
|
hard_ap_text2_image_3m_MRR = [0.457, 0.266, 0.564, 0.484, 0.548, 0.597, 0.663] |
|
hard_ap_text2_image_3m_ndcg = [0.491, 0.285, 0.599, 0.515, 0.580, 0.637, 0.703] |
|
|
|
hard_ap_text2_image_3m_data = { |
|
"Embedding Model": hard_embedding_models, |
|
"mAP": hard_ap_text2_image_3m_mAP, |
|
"R@10": hard_ap_text2_image_3m_Recall, |
|
"MRR": hard_ap_text2_image_3m_MRR, |
|
"nDCG@10": hard_ap_text2_image_3m_ndcg |
|
} |
|
hard_ap_text2_image_3m_df = pd.DataFrame(hard_ap_text2_image_3m_data) |
|
hard_ap_text2_image_3m_df = pd.DataFrame(hard_ap_text2_image_3m_df).sort_values(by="mAP", ascending=False).reset_index(drop=True) |
|
|
|
|
|
|
|
with gr.Blocks(css=""" |
|
.gradio-container { |
|
display: flex; |
|
justify-content: center; |
|
align-items: center; |
|
min-height: 100vh; |
|
flex-direction: column; |
|
} |
|
""") as demo: |
|
gr.Markdown("# Ecommerce Embedding Model Benchmarks") |
|
|
|
gr.Markdown("This Space contains benchmark results conducted as part of the release of our ecommerce embedding models: [**`Marqo-Ecommerce-L`**](https://huggingface.co/Marqo/marqo-ecommerce-embeddings-L) and [**`Marqo-Ecommerce-B`**](https://huggingface.co/Marqo/marqo-ecommerce-embeddings-B). ") |
|
gr.Markdown("Our benchmarking process was divided into two distinct regimes, each using different datasets of ecommerce product listings: **marqo-ecommerce-hard** and **marqo-ecommerce-easy**. Both datasets contained product images and text and only differed in size. The \"easy\" dataset is approximately 10-30 times smaller (200k vs 4M products), and designed to accommodate rate-limited models, specifically Cohere-Embeddings-v3 and GCP-Vertex (with limits of 0.66 rps and 2 rps respectively). The \"hard\" dataset represents the true challenge, since it contains four million ecommerce product listings and is more representative of real-world ecommerce search scenarios.") |
|
gr.Markdown('Within both these scenarios, the models were benchmarked against three different tasks:') |
|
gr.Markdown('- **Google Shopping Text-to-Image**') |
|
gr.Markdown('- **Google Shopping Category-to-Image**') |
|
gr.Markdown('- **Amazon Products Text-to-Image**') |
|
gr.Markdown('As part of this launch, we also released two evaluation datasets: [`Marqo/google-shopping-general-eval`](https://huggingface.co/datasets/Marqo/google-shopping-general-eval) and [`Marqo/amazon-products-eval`](https://huggingface.co/datasets/Marqo/amazon-products-eval).') |
|
gr.Markdown('For more information on these models, benchmark results, and how you can run these evaluations yourself, visit our [blog post](https://www.marqo.ai/blog/introducing-marqos-ecommerce-embedding-models).') |
|
|
|
|
|
gr.Markdown('## Marqo-Ecommerce-Hard') |
|
gr.Markdown('### Google Shopping Text to Image 1m') |
|
gr.Dataframe(value=hard_gs_text2_image_1m_df, headers="keys", interactive=True, datatype=["html", "number", "number", "number", "number"]) |
|
|
|
gr.Markdown('### Google Shopping Category to Image 1m') |
|
gr.Dataframe(value=hard_gs_category2_image_1m_df, headers="keys", interactive=True, datatype=["html", "number", "number", "number", "number"]) |
|
|
|
gr.Markdown('### Amazon Products Text to Image 3m') |
|
gr.Dataframe(value=hard_ap_text2_image_3m_df, headers="keys", interactive=True, datatype=["html", "number", "number", "number", "number"]) |
|
|
|
|
|
gr.Markdown('## Marqo-Ecommerce-Easy') |
|
gr.Markdown('### Google Shopping Text to Image') |
|
gr.Dataframe(value=gs_text2_image_1m_df, headers="keys", interactive=True, datatype=["html", "number", "number", "number", "number"]) |
|
|
|
gr.Markdown('### Google Shopping Category to Image') |
|
gr.Dataframe(value=gs_category2_image_1m_df, headers="keys", interactive=True, datatype=["html", "number", "number", "number", "number"]) |
|
|
|
gr.Markdown('### Amazon Products Text to Image') |
|
gr.Dataframe(value=ap_text2_image_3m_df, headers="keys", interactive=True, datatype=["html", "number", "number", "number", "number"]) |
|
|
|
demo.launch() |
|
|