ecqvln commited on
Commit
e14bef0
·
verified ·
1 Parent(s): 2e0fcae

Upload 6 files

Browse files
Files changed (6) hide show
  1. Train-1.tsv +0 -0
  2. Train-2.tsv +0 -0
  3. Train-3.tsv +0 -0
  4. implementation1.py +84 -0
  5. results.md +6 -0
  6. train.py +27 -0
Train-1.tsv ADDED
The diff for this file is too large to render. See raw diff
 
Train-2.tsv ADDED
The diff for this file is too large to render. See raw diff
 
Train-3.tsv ADDED
The diff for this file is too large to render. See raw diff
 
implementation1.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import matplotlib.pyplot as plt
3
+ import csv
4
+ from sklearn.model_selection import train_test_split, GridSearchCV
5
+ from sklearn.svm import SVC
6
+ from sklearn.neighbors import KNeighborsClassifier
7
+ from sklearn.feature_extraction.text import TfidfVectorizer
8
+ from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, confusion_matrix
9
+
10
+ file_path = 'Test-3.tsv'
11
+ data = pd.read_csv(file_path, sep="\t", names=["Sentence", "Label"], skiprows=1, quoting=csv.QUOTE_NONE, encoding="utf-8")
12
+ data.columns = data.columns.str.strip()
13
+
14
+ data = data.dropna(subset=['Sentence', 'Label'])
15
+ data['Sentence'] = data['Sentence'].astype(str)
16
+
17
+ X = data['Sentence']
18
+ y = data['Label']
19
+
20
+ vectorizer = TfidfVectorizer(ngram_range=(1, 2), max_features=5000)
21
+ X_tfidf = vectorizer.fit_transform(X)
22
+
23
+ X_train, X_test, y_train, y_test = train_test_split(
24
+ X_tfidf, y, test_size=0.3, random_state=42, stratify=y)
25
+
26
+ plt.figure(figsize=(8, 6))
27
+ y.value_counts().sort_index().plot(kind='bar', color='skyblue')
28
+ plt.title('Class Distribution')
29
+ plt.xlabel('Class')
30
+ plt.ylabel('Frequency')
31
+ plt.xticks(rotation=0)
32
+ plt.tight_layout()
33
+ plt.savefig('class_distribution.png')
34
+ plt.close()
35
+
36
+ svm_model = SVC(kernel='rbf', degree=3, random_state=42, class_weight='balanced')
37
+ svm_model.fit(X_train, y_train)
38
+ svm_predictions = svm_model.predict(X_test)
39
+
40
+ print("SVM Model Performance:")
41
+ print(f"Precision: {precision_score(y_test, svm_predictions, average='weighted', zero_division=0):.4f}")
42
+ print(f"Recall: {recall_score(y_test, svm_predictions, average='weighted', zero_division=0):.4f}")
43
+ print(f"F1-Score: {f1_score(y_test, svm_predictions, average='weighted', zero_division=0):.4f}")
44
+ print(f"Accuracy: {accuracy_score(y_test, svm_predictions):.4f}")
45
+
46
+ param_grid = {'n_neighbors': list(range(3, 21, 2))}
47
+ knn = KNeighborsClassifier()
48
+ grid_search = GridSearchCV(knn, param_grid, cv=5, scoring='accuracy')
49
+ grid_search.fit(X_train, y_train)
50
+
51
+ best_knn = grid_search.best_estimator_
52
+ knn_predictions = best_knn.predict(X_test)
53
+
54
+ print("\nKNN Model Performance:")
55
+ print(f"Best k: {grid_search.best_params_['n_neighbors']}")
56
+ print(f"Precision: {precision_score(y_test, knn_predictions, average='weighted', zero_division=0):.4f}")
57
+ print(f"Recall: {recall_score(y_test, knn_predictions, average='weighted', zero_division=0):.4f}")
58
+ print(f"F1-Score: {f1_score(y_test, knn_predictions, average='weighted', zero_division=0):.4f}")
59
+ print(f"Accuracy: {accuracy_score(y_test, knn_predictions):.4f}")
60
+
61
+ def plot_conf_matrix(cm, title, filename):
62
+ plt.figure(figsize=(6, 5))
63
+ plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
64
+ plt.title(title)
65
+ plt.colorbar()
66
+ tick_marks = range(len(cm))
67
+ plt.xticks(tick_marks, tick_marks)
68
+ plt.yticks(tick_marks, tick_marks)
69
+ plt.xlabel('Predicted Label')
70
+ plt.ylabel('True Label')
71
+
72
+ thresh = cm.max() / 2.
73
+ for i in range(cm.shape[0]):
74
+ for j in range(cm.shape[1]):
75
+ plt.text(j, i, str(cm[i, j]),
76
+ ha='center', va='center',
77
+ color='white' if cm[i, j] > thresh else 'black')
78
+
79
+ plt.tight_layout()
80
+ plt.savefig(filename)
81
+ plt.close()
82
+
83
+ plot_conf_matrix(confusion_matrix(y_test, svm_predictions), 'SVM Confusion Matrix', 'svm_conf_matrix.png')
84
+ plot_conf_matrix(confusion_matrix(y_test, knn_predictions), 'KNN Confusion Matrix', 'knn_conf_matrix.png')
results.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ | # | method | algorithm | train | Test 1: group 1Precision, recall, f1, accuracy | Test 2: group 2 | Test 3: group 3 |
2
+ | --- | --- | --- | --- | --- | --- | --- |
3
+ | 1.a.i | Machine learning (2 methods) | SVM | Train 1 or 2 or 3 / [respective own train set] | Precision: 0.6301 Recall: 0.6684 F1-Score: 0.5532 Accuracy: 0.6684 | Precision: 0.5402 Recall: 0.6099 F1-Score: 0.4935 Accuracy: 0.6099 | Precision: 0.4200 Recall: 0.6481 F1-Score: 0.5097 Accuracy: 0.6481 |
4
+ | 1.a.ii | | SVM | TRAIN | Precision: 0.6119 Recall: 0.6782 F1-Score: 0.6182 Accuracy: 0.6782 | Precision: 0.5699 Recall: 0.6222 F1-Score: 0.5624 Accuracy: 0.6222 | |
5
+ | 1 b.i | | K-Nearest Neighbors (KNN) | Train 1 or 2 or 3 / [respective own train] | Precision: 0.5941 Recall: 0.6684 F1-Score: 0.5544 Accuracy: 0.6684 | Precision: 0.4766 Recall: 0.5964 F1-Score: 0.4870 Accuracy: 0.5964 | Precision: 0.5098 Recall: 0.6567 F1-Score: 0.5366 Accuracy: 0.6567 |
6
+ | 1.b.ii | | KNN | TRAIN | Precision: 0.5066 Recall: 0.6398 F1-Score: 0.5233 Accuracy: 0.6398 | Precision: 0.5641 Recall: 0.6117 F1-Score: 0.5479 Accuracy: 0.6117 | |
train.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+
3
+ filenames = ['Train-1.tsv', 'Train-2.tsv', 'Train-3.tsv']
4
+
5
+ TRAIN = set()
6
+
7
+ for filename in filenames:
8
+ with open(filename, 'r', encoding='utf-8') as file:
9
+ tsv_reader = csv.reader(file, delimiter='\t')
10
+ header = next(tsv_reader)
11
+ try:
12
+ sentence_idx = header.index("Sentence")
13
+ label_idx = header.index("Label")
14
+ except ValueError:
15
+ raise ValueError(f"Required columns not found in {filename}")
16
+
17
+ for row in tsv_reader:
18
+ if len(row) > max(sentence_idx, label_idx):
19
+ TRAIN.add((row[sentence_idx], row[label_idx]))
20
+
21
+ with open('Combined_Train.tsv', 'w', encoding='utf-8', newline='') as outfile:
22
+ tsv_writer = csv.writer(outfile, delimiter='\t')
23
+ tsv_writer.writerow(["Sentence", "Label"])
24
+ for row in TRAIN:
25
+ tsv_writer.writerow(row)
26
+
27
+ print(f"Combined TRAIN set written to 'Combined_Train.tsv' with {len(TRAIN)} unique rows.")