aashituli commited on
Commit
8538457
·
verified ·
1 Parent(s): c50caee

Create train

Browse files
Files changed (1) hide show
  1. train +139 -0
train ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import torch
4
+ import numpy as np
5
+ from PIL import Image
6
+ from sklearn.metrics import classification_report, confusion_matrix
7
+ import matplotlib.pyplot as plt
8
+ import seaborn as sns
9
+ from torchvision import transforms
10
+ from transformers import (
11
+ ViTFeatureExtractor,
12
+ ViTForImageClassification,
13
+ Trainer,
14
+ TrainingArguments,
15
+ EarlyStoppingCallback,
16
+ default_data_collator
17
+ )
18
+ from datasets import load_dataset, Dataset, DatasetDict
19
+ from huggingface_hub import HfApi
20
+
21
+ # ============ CONFIG ============ #
22
+ MODEL_NAME = "wambugu71/crop_leaf_diseases_vit"
23
+ CSV_PATH = "dataset/labels.csv"
24
+ IMAGE_DIR = "dataset/images"
25
+ OUTPUT_DIR = "./vit_leaf_disease_model"
26
+ NUM_EPOCHS = 10
27
+ BATCH_SIZE = 16
28
+ LEARNING_RATE = 2e-5
29
+ SEED = 42
30
+
31
+ # Set random seed for reproducibility
32
+ torch.manual_seed(SEED)
33
+ np.random.seed(SEED)
34
+
35
+ # ============ LOAD DATA ============ #
36
+ df = pd.read_csv(CSV_PATH)
37
+ labels = sorted(df['label'].unique())
38
+ label2id = {label: i for i, label in enumerate(labels)}
39
+ id2label = {i: label for label, i in label2id.items()}
40
+ df['label_id'] = df['label'].map(label2id)
41
+
42
+ # ============ FEATURE EXTRACTOR & MODEL ============ #
43
+ feature_extractor = ViTFeatureExtractor.from_pretrained(MODEL_NAME)
44
+ model = ViTForImageClassification.from_pretrained(
45
+ MODEL_NAME,
46
+ num_labels=len(labels),
47
+ label2id=label2id,
48
+ id2label=id2label
49
+ )
50
+
51
+ # ============ IMAGE TRANSFORM ============ #
52
+ def preprocess(example):
53
+ image_path = os.path.join(IMAGE_DIR, example['image'])
54
+ image = Image.open(image_path).convert("RGB")
55
+ inputs = feature_extractor(images=image, return_tensors="pt")
56
+ example['pixel_values'] = inputs['pixel_values'][0]
57
+ example['label'] = example['label_id']
58
+ return example
59
+
60
+ # Convert to HF dataset
61
+ dataset = Dataset.from_pandas(df)
62
+ dataset = dataset.map(preprocess, remove_columns=['image', 'label', 'label_id'])
63
+ dataset = dataset.train_test_split(test_size=0.2, seed=SEED)
64
+ train_ds = dataset['train']
65
+ eval_ds = dataset['test']
66
+
67
+ # ============ METRICS ============ #
68
+ from evaluate import load
69
+ accuracy = load("accuracy")
70
+
71
+ def compute_metrics(eval_pred):
72
+ logits, labels = eval_pred
73
+ predictions = np.argmax(logits, axis=-1)
74
+ return accuracy.compute(predictions=predictions, references=labels)
75
+
76
+ # ============ TRAINING ARGS ============ #
77
+ training_args = TrainingArguments(
78
+ output_dir=OUTPUT_DIR,
79
+ per_device_train_batch_size=BATCH_SIZE,
80
+ per_device_eval_batch_size=BATCH_SIZE,
81
+ num_train_epochs=NUM_EPOCHS,
82
+ evaluation_strategy="epoch",
83
+ save_strategy="epoch",
84
+ learning_rate=LEARNING_RATE,
85
+ logging_dir="./logs",
86
+ logging_steps=10,
87
+ save_total_limit=2,
88
+ load_best_model_at_end=True,
89
+ metric_for_best_model="accuracy",
90
+ greater_is_better=True,
91
+ seed=SEED,
92
+ report_to="none"
93
+ )
94
+
95
+ # ============ TRAINER ============ #
96
+ trainer = Trainer(
97
+ model=model,
98
+ args=training_args,
99
+ train_dataset=train_ds,
100
+ eval_dataset=eval_ds,
101
+ tokenizer=feature_extractor,
102
+ data_collator=default_data_collator,
103
+ compute_metrics=compute_metrics,
104
+ callbacks=[EarlyStoppingCallback(early_stopping_patience=3)]
105
+ )
106
+
107
+ # ============ TRAIN ============ #
108
+ trainer.train()
109
+
110
+ # ============ SAVE MODEL ============ #
111
+ model.save_pretrained(OUTPUT_DIR)
112
+ feature_extractor.save_pretrained(OUTPUT_DIR)
113
+
114
+ # ============ EVALUATE ============ #
115
+ outputs = trainer.predict(eval_ds)
116
+ preds = np.argmax(outputs.predictions, axis=-1)
117
+ true_labels = outputs.label_ids
118
+
119
+ print("\nClassification Report:\n")
120
+ print(classification_report(true_labels, preds, target_names=labels))
121
+
122
+ # ============ CONFUSION MATRIX ============ #
123
+ cm = confusion_matrix(true_labels, preds)
124
+ plt.figure(figsize=(10, 8))
125
+ sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)
126
+ plt.xlabel("Predicted")
127
+ plt.ylabel("True")
128
+ plt.title("Confusion Matrix")
129
+ plt.tight_layout()
130
+ plt.savefig("confusion_matrix.png")
131
+ plt.show()
132
+
133
+ # ============ OPTIONAL: UPLOAD TO HF HUB ============ #
134
+ # api = HfApi()
135
+ # api.upload_folder(
136
+ # folder_path=OUTPUT_DIR,
137
+ # repo_id="your-username/crop_leaf_disease_vit_finetuned",
138
+ # repo_type="model"
139
+ # )