Spaces:
Running
Running
Upload app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ user_input = st.text_area("Your text here:")
|
|
18 |
if st.button("Predict"):
|
19 |
if user_input:
|
20 |
# Tokenize input text
|
21 |
-
inputs = tokenizer(user_input, return_tensors="pt"
|
22 |
|
23 |
# Get predictions from the model
|
24 |
with torch.no_grad():
|
@@ -26,30 +26,22 @@ if st.button("Predict"):
|
|
26 |
|
27 |
# Extract the predictions
|
28 |
predictions = outputs.logits.squeeze()
|
29 |
-
|
30 |
# Convert to numpy array if necessary
|
31 |
predicted_scores = predictions.numpy()
|
|
|
|
|
32 |
|
33 |
-
#
|
34 |
-
|
35 |
-
adjusted_scores = predicted_scores * reduction_factor
|
36 |
-
|
37 |
-
# Ensure scores do not go below zero
|
38 |
-
adjusted_scores = np.maximum(adjusted_scores, 0)
|
39 |
-
|
40 |
-
# Normalize the scores to ensure they fall within the 0-9 range
|
41 |
-
normalized_scores = (adjusted_scores / adjusted_scores.max()) * 9 # Scale to 9
|
42 |
-
|
43 |
-
# Apply additional reductions to all scores
|
44 |
-
additional_reduction = 1.9 # Further reduce all scores
|
45 |
-
normalized_scores = np.maximum(normalized_scores - additional_reduction, 0)
|
46 |
-
|
47 |
-
# Round the scores
|
48 |
-
rounded_scores = np.round(normalized_scores * 2) / 2
|
49 |
|
|
|
|
|
|
|
|
|
50 |
# Display the predictions
|
51 |
labels = ["Task Achievement", "Coherence and Cohesion", "Vocabulary", "Grammar", "Overall"]
|
52 |
for label, score in zip(labels, rounded_scores):
|
53 |
-
st.write(f"{label}: {score
|
54 |
else:
|
55 |
-
st.write("Please enter some text to get scores.")
|
|
|
18 |
if st.button("Predict"):
|
19 |
if user_input:
|
20 |
# Tokenize input text
|
21 |
+
inputs = tokenizer(user_input, return_tensors="pt")
|
22 |
|
23 |
# Get predictions from the model
|
24 |
with torch.no_grad():
|
|
|
26 |
|
27 |
# Extract the predictions
|
28 |
predictions = outputs.logits.squeeze()
|
29 |
+
|
30 |
# Convert to numpy array if necessary
|
31 |
predicted_scores = predictions.numpy()
|
32 |
+
#predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
33 |
+
#predictions = predictions[0].tolist()
|
34 |
|
35 |
+
# Convert predictions to a NumPy array for the calculations
|
36 |
+
#predictions_np = np.array(predictions)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
# Scale the predictions
|
39 |
+
scaled_scores = 2.25 * predicted_scores - 1.25
|
40 |
+
rounded_scores = [round(score * 2) / 2 for score in scaled_scores] # Round to nearest 0.5
|
41 |
+
|
42 |
# Display the predictions
|
43 |
labels = ["Task Achievement", "Coherence and Cohesion", "Vocabulary", "Grammar", "Overall"]
|
44 |
for label, score in zip(labels, rounded_scores):
|
45 |
+
st.write(f"{label}: {score:}")
|
46 |
else:
|
47 |
+
st.write("Please enter some text to get scores.")
|