kar0lina commited on
Commit
b1a5fe5
Β·
1 Parent(s): aa27dd5

Initial commit

Browse files
Files changed (4) hide show
  1. .gitignore +2 -0
  2. README.md +5 -5
  3. app.py +108 -0
  4. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Virtual environments
2
+ venv
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Petarda
3
- emoji: 🌍
4
- colorFrom: gray
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
- short_description: PErsonality Trait prediction - AI model Roberta - Demo App
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Petarda Classification-neu
3
+ emoji: 🀯🀯🀯🀯🀯
4
+ colorFrom: yellow
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
+ short_description: classificating Personality Traits using roberta - demo app
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # 1. Import the required packages
3
+ import torch
4
+ import gradio as gr
5
+
6
+ from typing import Dict
7
+ from transformers import pipeline
8
+
9
+ # 2. Define function to use our model on given text
10
+ def trait_classifier(text: str) -> Dict[str, float]:
11
+ # Set up text classification pipeline
12
+ agr_classifier = pipeline(task="text-classification",
13
+ # Because our model is on Hugging Face already, we can pass in the model name directly
14
+ model="kar0lina/petarda_xlm-roberta-base_agr", # link to model on HF Hub
15
+ device="cuda" if torch.cuda.is_available() else "cpu",
16
+ top_k=None) # return all possible scores (not just top-1)
17
+ con_classifier = pipeline(task="text-classification",
18
+ # Because our model is on Hugging Face already, we can pass in the model name directly
19
+ model="kar0lina/petarda_xlm-roberta-base_con", # link to model on HF Hub
20
+ device="cuda" if torch.cuda.is_available() else "cpu",
21
+ top_k=None) # return all possible scores (not just top-1)
22
+ ext_classifier = pipeline(task="text-classification",
23
+ # Because our model is on Hugging Face already, we can pass in the model name directly
24
+ model="kar0lina/petarda_xlm-roberta-base_ext", # link to model on HF Hub
25
+ device="cuda" if torch.cuda.is_available() else "cpu",
26
+ top_k=None) # return all possible scores (not just top-1)
27
+ neu_classifier = pipeline(task="text-classification",
28
+ # Because our model is on Hugging Face already, we can pass in the model name directly
29
+ model="kar0lina/petarda_xlm-roberta-base_neu", # link to model on HF Hub
30
+ device="cuda" if torch.cuda.is_available() else "cpu",
31
+ top_k=None) # return all possible scores (not just top-1)
32
+ ope_classifier = pipeline(task="text-classification",
33
+ # Because our model is on Hugging Face already, we can pass in the model name directly
34
+ model="kar0lina/petarda_xlm-roberta-base_ope", # link to model on HF Hub
35
+ device="cuda" if torch.cuda.is_available() else "cpu",
36
+ top_k=None) # return all possible scores (not just top-1)
37
+
38
+
39
+ # Get outputs from pipeline (as a list of dicts)
40
+ outputs_agr = agr_classifier(text)[0]
41
+ outputs_con = con_classifier(text)[0]
42
+ outputs_ext = ext_classifier(text)[0]
43
+ outputs_neu = neu_classifier(text)[0]
44
+ outputs_ope = ope_classifier(text)[0]
45
+ print("outputs_agr: ", outputs_agr)
46
+ print("outputs_con: ", outputs_con)
47
+ print("outputs_ext: ", outputs_ext)
48
+ print("outputs_neu: ", outputs_neu)
49
+ print("outputs_ope: ", outputs_ope)
50
+
51
+ # Format output for Gradio (e.g. {"label_1": probability_1, "label_2": probability_2})
52
+ output_dict = {}
53
+ #agr
54
+ if outputs_agr[0]['label'] == 'POSITIVE':
55
+ output_dict['Agreeablenes'] = outputs_agr[0]["score"]
56
+ else:
57
+ output_dict['Agreeablenes'] = outputs_agr[1]["score"]
58
+
59
+
60
+ #con
61
+ if outputs_con[0]['label'] == 'POSITIVE':
62
+ output_dict['Consiousness'] = outputs_con[0]["score"]
63
+ else:
64
+ output_dict['Consiousness'] = outputs_con[1]["score"]
65
+
66
+
67
+ #ext
68
+ if outputs_ext[0]['label'] == 'POSITIVE':
69
+ output_dict['Extraversion'] = outputs_ext[0]["score"]
70
+ else:
71
+ output_dict['Extraversion'] = outputs_ext[1]["score"]
72
+
73
+
74
+ #neu
75
+ if outputs_neu[0]['label'] == 'POSITIVE':
76
+ output_dict['Neuroticism'] = outputs_neu[0]["score"]
77
+ else:
78
+ output_dict['Neuroticism'] = outputs_neu[1]["score"]
79
+
80
+
81
+ #ope
82
+ if outputs_ope[0]['label'] == 'POSITIVE':
83
+ output_dict['Openness'] = outputs_ope[0]["score"]
84
+ else:
85
+ output_dict['Openness'] = outputs_ope[1]["score"]
86
+
87
+
88
+ # output_dict = {'neu': probability_1, 'ext': probability_2, 'ope': # probability_3}
89
+ return output_dict
90
+
91
+ # 3. Create a Gradio interface with details about our app
92
+ description = """
93
+ A text classifier for PErsonality Trait prediction using Ai model Roberta - Demo App.
94
+
95
+ Fine-tuned from [xlm-roberta-base](https://huggingface.co/FacebookAI/xlm-roberta-base)"""
96
+
97
+ demo = gr.Interface(fn=trait_classifier,
98
+ inputs="text",
99
+ outputs=gr.Label(num_top_classes=2), # show top 2 classes (that's all we have)
100
+ title="🀯 Petarda",
101
+ theme="gr.themes.Ocean((primary_hue='amber', secondary_hue='cyan')",
102
+ description=description,
103
+ examples=[["I have a real problem right now... I am struggling a lot :("],
104
+ ["He's such a nice, gentle man and it was great"]])
105
+
106
+ # 4. Launch the interface
107
+ if __name__ == "__main__":
108
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ torch
3
+ transformers