Update README.md
Browse files
README.md
CHANGED
@@ -4,6 +4,7 @@ license: mit
|
|
4 |
base_model: deepset/gbert-large
|
5 |
tags:
|
6 |
- generated_from_trainer
|
|
|
7 |
metrics:
|
8 |
- accuracy
|
9 |
- f1
|
@@ -12,6 +13,11 @@ metrics:
|
|
12 |
model-index:
|
13 |
- name: german-zeroshot
|
14 |
results: []
|
|
|
|
|
|
|
|
|
|
|
15 |
---
|
16 |
|
17 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -26,6 +32,50 @@ It achieves the following results on the evaluation set:
|
|
26 |
- F1: 0.8487
|
27 |
- Precision: 0.8505
|
28 |
- Recall: 0.8486
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
## Model description
|
31 |
|
@@ -82,4 +132,4 @@ The following hyperparameters were used during training:
|
|
82 |
- Transformers 4.48.0.dev0
|
83 |
- Pytorch 2.4.1+cu121
|
84 |
- Datasets 3.1.0
|
85 |
-
- Tokenizers 0.21.0
|
|
|
4 |
base_model: deepset/gbert-large
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
+
- german
|
8 |
metrics:
|
9 |
- accuracy
|
10 |
- f1
|
|
|
13 |
model-index:
|
14 |
- name: german-zeroshot
|
15 |
results: []
|
16 |
+
datasets:
|
17 |
+
- facebook/xnli
|
18 |
+
language:
|
19 |
+
- de
|
20 |
+
pipeline_tag: zero-shot-classification
|
21 |
---
|
22 |
|
23 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
32 |
- F1: 0.8487
|
33 |
- Precision: 0.8505
|
34 |
- Recall: 0.8486
|
35 |
+
-
|
36 |
+
## Usage
|
37 |
+
|
38 |
+
```python
|
39 |
+
# Use a pipeline as a high-level helper
|
40 |
+
|
41 |
+
pipe = pipeline(
|
42 |
+
"zero-shot-classification",
|
43 |
+
model="kaixkhazaki/german-zeroshot",
|
44 |
+
tokenizer="kaixkhazaki/german-zeroshot",
|
45 |
+
device=0 if torch.cuda.is_available() else -1 # Use GPU if available
|
46 |
+
)
|
47 |
+
|
48 |
+
#Enter your text and possible candidates of classification
|
49 |
+
sequence = "Können Sie mir die Schritte zur Konfiguration eines VPN auf einem Linux-Server erklären?"
|
50 |
+
candidate_labels = [
|
51 |
+
"Technische Dokumentation",
|
52 |
+
"IT-Support",
|
53 |
+
"Netzwerkadministration",
|
54 |
+
"Linux-Konfiguration",
|
55 |
+
"VPN-Setup"
|
56 |
+
]
|
57 |
+
pipe(sequence,candidate_labels)
|
58 |
+
>>
|
59 |
+
{'sequence': 'Können Sie mir die Schritte zur Konfiguration eines VPN auf einem Linux-Server erklären?',
|
60 |
+
'labels': ['VPN-Setup', 'Linux-Konfiguration', 'Netzwerkadministration', 'IT-Support', 'Technische Dokumentation'],
|
61 |
+
'scores': [0.3245040476322174, 0.32373329997062683, 0.16423103213310242, 0.09850211441516876, 0.08902951329946518]}
|
62 |
+
|
63 |
+
#example 2
|
64 |
+
sequence = "Können Sie mir die Schritte zur Konfiguration eines VPN auf einem Linux-Server erklären?"
|
65 |
+
candidate_labels = [
|
66 |
+
"Technische Dokumentation",
|
67 |
+
"IT-Support",
|
68 |
+
"Netzwerkadministration",
|
69 |
+
"Linux-Konfiguration",
|
70 |
+
"VPN-Setup"
|
71 |
+
]
|
72 |
+
pipe(sequence,candidate_labels)
|
73 |
+
>>
|
74 |
+
{'sequence': 'Wie lautet die Garantiezeit für dieses Produkt?',
|
75 |
+
'labels': ['Garantiebedingungen', 'Produktdetails', 'Reklamation', 'Kundendienst', 'Kaufberatung'],
|
76 |
+
'scores': [0.4313304126262665, 0.2905466556549072, 0.10058070719242096, 0.09384352713823318, 0.08369863778352737]}
|
77 |
+
|
78 |
+
```
|
79 |
|
80 |
## Model description
|
81 |
|
|
|
132 |
- Transformers 4.48.0.dev0
|
133 |
- Pytorch 2.4.1+cu121
|
134 |
- Datasets 3.1.0
|
135 |
+
- Tokenizers 0.21.0
|