How to Get Started with the Model
import json
import torch
from transformers import AutoTokenizer, AutoModelForTokenClassification
# Load model directly
tokenizer = AutoTokenizer.from_pretrained("bhavan2410/bias-lens-detection-model")
model = AutoModelForTokenClassification.from_pretrained("bhavan2410/bias-lens-detection-model")
model.eval()
model.to('cuda' if torch.cuda.is_available() else 'cpu')
# ids to labels we want to display
id2label = {
0: "O",
1: "B-STEREO",
2: "I-STEREO",
3: "B-GEN",
4: "I-GEN",
5: "B-UNFAIR",
6: "I-UNFAIR",
7: "B-EXCL",
8: "I-EXCL",
9: "B-FRAME",
10: "I-FRAME",
11: "B-ASSUMP",
12: "I-ASSUMP",
}
def predict_ner_tags(sentence):
inputs = tokenizer(sentence, return_tensors="pt", padding=True, truncation=True, max_length=128)
input_ids = inputs['input_ids'].to(model.device)
attention_mask = inputs['attention_mask'].to(model.device)
with torch.no_grad():
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
logits = outputs.logits
probabilities = torch.sigmoid(logits)
predicted_labels = (probabilities > 0.3).int()
result = []
tokens = tokenizer.convert_ids_to_tokens(input_ids[0])
for i, token in enumerate(tokens):
if token not in tokenizer.all_special_tokens:
label_indices = (predicted_labels[0][i] == 1).nonzero(as_tuple=False).squeeze(-1)
labels = [id2label[idx.item()] for idx in label_indices] if label_indices.numel() > 0 else ['O']
result.append({"token": token, "labels": labels})
return json.dumps(result, indent=4)
- Downloads last month
- 198
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
๐
Ask for provider support
Model tree for bhavan2410/bias-lens-detection-model
Base model
google-bert/bert-base-uncased