Update README.md
Browse files
README.md
CHANGED
|
@@ -109,6 +109,31 @@ print( result)
|
|
| 109 |
[{'label': 'paraphrase', 'score': 0.9801033139228821}, {'label': 'not_paraphrase', 'score': 0.9302119016647339}]
|
| 110 |
```
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
## Training and evaluation data
|
| 113 |
|
| 114 |
More information needed
|
|
|
|
| 109 |
[{'label': 'paraphrase', 'score': 0.9801033139228821}, {'label': 'not_paraphrase', 'score': 0.9302119016647339}]
|
| 110 |
```
|
| 111 |
|
| 112 |
+
Using AutoModel & AutoTokenizer:
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
```python
|
| 116 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 117 |
+
import torch
|
| 118 |
+
|
| 119 |
+
tokenizer = AutoTokenizer.from_pretrained("azherali/bert_paraphrase")
|
| 120 |
+
model = AutoModelForSequenceClassification.from_pretrained("azherali/bert_paraphrase")
|
| 121 |
+
|
| 122 |
+
# Example sentences
|
| 123 |
+
sent1 = "The quick brown fox jumps over the lazy dog."
|
| 124 |
+
sent2 = "A fast brown fox leaps over a lazy dog."
|
| 125 |
+
|
| 126 |
+
inputs = tokenizer(sent1, sent2, return_tensors="pt")
|
| 127 |
+
outputs = model(**inputs)
|
| 128 |
+
|
| 129 |
+
logits = outputs.logits
|
| 130 |
+
predicted_class = torch.argmax(logits, dim=1).item()
|
| 131 |
+
|
| 132 |
+
print("Prediction:", model.config.id2label[predicted_class])
|
| 133 |
+
Prediction: paraphrase
|
| 134 |
+
```
|
| 135 |
+
|
| 136 |
+
|
| 137 |
## Training and evaluation data
|
| 138 |
|
| 139 |
More information needed
|