Spaces:
Running
Running
import gradio as gr | |
from transformers import AutoTokenizer, AutoModel | |
import torch | |
import torch.nn.functional as F | |
# Load model and tokenizer | |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-Embedding-0.6B") | |
model = AutoModel.from_pretrained("Qwen/Qwen3-Embedding-0.6B") | |
def get_embedding(text): | |
inputs = tokenizer(text, return_tensors="pt", truncation=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
return outputs.last_hidden_state[:, 0, :] # [CLS] token | |
def compare_sentences(reference, comparisons): | |
if len(reference) > 250: | |
return "β Error: Reference exceeds 250 character limit." | |
comparison_list = [s.strip() for s in comparisons.strip().split('\n') if s.strip()] | |
if not comparison_list: | |
return "β Error: No comparison sentences provided." | |
if any(len(s) > 250 for s in comparison_list): | |
return "β Error: One or more comparison sentences exceed 250 characters." | |
ref_emb = get_embedding(reference) | |
comp_embs = torch.cat([get_embedding(s) for s in comparison_list], dim=0) | |
similarities = F.cosine_similarity(ref_emb, comp_embs).tolist() | |
results = "\n".join([f"Similarity with: \"{s}\"\nβ {round(score, 4)}" for s, score in zip(comparison_list, similarities)]) | |
return results | |
demo = gr.Interface( | |
fn=compare_sentences, | |
inputs=[ | |
gr.Textbox(label="Reference Sentence (max 250 characters)", lines=2, placeholder="Type the reference sentence here..."), | |
gr.Textbox(label="Comparison Sentences (one per line, each max 250 characters)", lines=8, placeholder="Type comparison sentences here, one per line..."), | |
], | |
outputs="text", | |
title="Qwen3 Embedding Comparison Demo", | |
description="Enter a reference sentence and multiple comparison sentences (one per line). The model computes the cosine similarity between the reference and each comparison." | |
) | |
if __name__ == "__main__": | |
demo.launch() | |