c++ commited on
Commit
4e3ce18
·
verified ·
1 Parent(s): f60ca25

Update Rock_Paper_Scissors_VGG16.py

Browse files
Files changed (1) hide show
  1. Rock_Paper_Scissors_VGG16.py +180 -186
Rock_Paper_Scissors_VGG16.py CHANGED
@@ -1,186 +1,180 @@
1
- import os
2
- import numpy as np
3
- from keras import applications, Sequential
4
- from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
5
- from keras.layers import Dense, Dropout, Flatten, BatchNormalization
6
- from keras.optimizers import Adam, SGD
7
- from keras.preprocessing.image import ImageDataGenerator
8
- from keras.regularizers import l2
9
- from matplotlib import pyplot as plt
10
- from sklearn.metrics import classification_report, confusion_matrix
11
-
12
- # Part 1
13
- # a) visualized samples from the dataset, i.e.: rock, paper, scissors hand signs
14
- # with the appropriate labels
15
- # b) summary of the model architecture in a form of a plot or text
16
- # c) model accuracy evaluation plot after the training concludes
17
- # d) model loss evaluation plot after the training concludes
18
-
19
-
20
- # Image directory's and defining the dimensions & Batch size as well as epochs
21
- base_dir = '../rps'
22
- train_dir = os.path.join(base_dir, 'train')
23
- valid_dir = os.path.join(base_dir, 'validation')
24
- BATCH_SIZE = 32
25
- EPOCHS = 7
26
- img_width, img_height = 224, 224
27
- # Define L2 regularization coefficient to prevent overfitting
28
- l2_reg = 0.00001
29
-
30
- # Optimization + Learning rate variables
31
- opt = Adam(learning_rate=1e-4)
32
- opt1 = Adam(learning_rate=2e-4)
33
- opt2 = Adam(learning_rate=0.0001)
34
- opt3 = SGD(learning_rate=1e-4, momentum=0.99)
35
-
36
- # Preparing the Train/Validation and Augmentation Data
37
- train_datagen = ImageDataGenerator(
38
- rescale=1.0 / 255,
39
- rotation_range=90,
40
- zoom_range=0.1,
41
- width_shift_range=0.2,
42
- height_shift_range=0.2,
43
- shear_range=0.2,
44
- # horizontal_flip=True,
45
- vertical_flip=True,
46
- brightness_range=(0.2, 1),
47
- fill_mode='nearest',
48
- validation_split=0.2)
49
-
50
- train_generator = train_datagen.flow_from_directory(
51
- train_dir,
52
- shuffle=True,
53
- target_size=(img_width, img_height),
54
- batch_size=BATCH_SIZE,
55
- class_mode='categorical',
56
- subset='training')
57
-
58
- # a) Visualize samples from the dataset
59
- class_names = ['paper', 'rock', 'scissors']
60
- images, labels = train_generator.next()
61
- plt.figure(figsize=(10, 10))
62
- for i in range(9):
63
- plt.subplot(3, 3, i + 1)
64
- label_index = np.argmax(labels[i])
65
- plt.title('Label: ' + class_names[label_index])
66
- plt.imshow(images[i])
67
- plt.tight_layout()
68
- plt.axis('off')
69
- plt.show()
70
-
71
- validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
72
- validation_generator = validation_datagen.flow_from_directory(
73
- valid_dir,
74
- target_size=(img_width, img_height),
75
- batch_size=BATCH_SIZE,
76
- class_mode='categorical')
77
-
78
- # -------Callbacks-------------#
79
- # It'll save the best trained weight
80
- checkpoint = ModelCheckpoint(
81
- filepath='best_weights.hdf5',
82
- monitor='val_loss',
83
- verbose=1,
84
- save_best_only=True,
85
- mode='min',
86
- save_weights_only=False
87
- )
88
- # Early stop = in case of high Validation Loss
89
- early_stop = EarlyStopping(
90
- monitor='val_loss',
91
- min_delta=0.001,
92
- patience=5,
93
- verbose=1,
94
- mode='auto'
95
- )
96
- # Defining a learning rate reduction callback when its necessary it'll reduce
97
- # the learning rate when its necessary
98
- lr_reduction = ReduceLROnPlateau(
99
- monitor='val_loss',
100
- factor=0.2,
101
- patience=2,
102
- verbose=1,
103
- mode='auto',
104
- cooldown=1,
105
- min_lr=0.000001
106
- )
107
- callbacks = [checkpoint, early_stop, lr_reduction]
108
-
109
- # Load the pre-trained VGG16 model without the top layer
110
- base_model = applications.VGG16(weights='imagenet', include_top=False, pooling='max',
111
- input_shape=(img_width, img_height, 3))
112
-
113
- # Freeze the pre-trained layers from 0-14,
114
- # so they are not updated during training
115
- for layer in base_model.layers[:10]:
116
- layer.trainable = False
117
- # b) summary of base model
118
- base_model.summary()
119
-
120
- # Adding custom layers on top of VGG16
121
- model = Sequential()
122
- model.add(base_model)
123
- model.add(Flatten())
124
- model.add(Dense(512, activation='relu', kernel_regularizer=l2(l2_reg)))
125
- model.add(BatchNormalization())
126
- model.add(Dropout(0.3))
127
- model.add(Dense(3, activation='softmax', kernel_regularizer=l2(l2_reg)))
128
- # b) summary of model
129
- model.summary()
130
-
131
- # Compile the model
132
- model.compile(optimizer=opt,
133
- loss='categorical_crossentropy',
134
- metrics=['accuracy'])
135
-
136
- # Finally we train the model with our desired adjustments
137
- history = model.fit(
138
- train_generator,
139
- epochs=EPOCHS,
140
- callbacks=callbacks,
141
- validation_data=validation_generator)
142
-
143
-
144
- # Plotting the Models 'accuracy' & 'loss'
145
- def eval_plot(history):
146
- plt.figure(figsize=(14, 5))
147
-
148
- # Accuracy plot
149
- plt.subplot(1, 2, 1)
150
- acc = history.history['accuracy']
151
- val_acc = history.history['val_accuracy']
152
- epochs = range(len(acc))
153
- acc_plot, = plt.plot(epochs, acc, 'r')
154
- val_acc_plot, = plt.plot(epochs, val_acc, 'b')
155
- plt.title('Training and Validation Accuracy')
156
- plt.legend([acc_plot, val_acc_plot], ['Training Accuracy', 'Validation Accuracy'])
157
-
158
- # Loss plot
159
- plt.subplot(1, 2, 2)
160
- loss = history.history['loss']
161
- val_loss = history.history['val_loss']
162
- epochs = range(len(loss))
163
- loss_plot, = plt.plot(epochs, loss, 'r')
164
- val_loss_plot, = plt.plot(epochs, val_loss, 'b')
165
- plt.title('Training and Validation Loss')
166
- plt.legend([loss_plot, val_loss_plot], ['Training Loss', 'Validation Loss'])
167
- plt.tight_layout()
168
- plt.show()
169
-
170
-
171
- # Evaluate the Process to find out how well the model has been trained
172
- def evaluate(model):
173
- num_of_test_samples = len(validation_generator.filenames)
174
-
175
- y_pred = model.predict(validation_generator, num_of_test_samples // BATCH_SIZE + 1)
176
- y_pred = np.argmax(y_pred, axis=1)
177
- print('\nConfusion Matrix\n')
178
- print(confusion_matrix(validation_generator.classes, y_pred))
179
- print('\n\nClassification Report\n')
180
- target_names = ['Paper', 'Rock', 'Scissors']
181
- print(classification_report(validation_generator.classes, y_pred, target_names=target_names))
182
-
183
-
184
- eval_plot(history)
185
- evaluate(model)
186
- model.save('../Rock_Paper_Scissors_VGG16/RPS_Model.hdf5')
 
1
+ import os
2
+ import numpy as np
3
+ from keras import applications, Sequential
4
+ from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
5
+ from keras.layers import Dense, Dropout, Flatten, BatchNormalization
6
+ from keras.optimizers import Adam, SGD
7
+ from keras.preprocessing.image import ImageDataGenerator
8
+ from keras.regularizers import l2
9
+ from matplotlib import pyplot as plt
10
+ from sklearn.metrics import classification_report, confusion_matrix
11
+
12
+
13
+
14
+ # Image directory's and defining the dimensions & Batch size as well as epochs
15
+ base_dir = '../rps'
16
+ train_dir = os.path.join(base_dir, 'train')
17
+ valid_dir = os.path.join(base_dir, 'validation')
18
+ BATCH_SIZE = 32
19
+ EPOCHS = 7
20
+ img_width, img_height = 224, 224
21
+ # Define L2 regularization coefficient to prevent overfitting
22
+ l2_reg = 0.00001
23
+
24
+ # Optimization + Learning rate variables
25
+ opt = Adam(learning_rate=1e-4)
26
+ opt1 = Adam(learning_rate=2e-4)
27
+ opt2 = Adam(learning_rate=0.0001)
28
+ opt3 = SGD(learning_rate=1e-4, momentum=0.99)
29
+
30
+ # Preparing the Train/Validation and Augmentation Data
31
+ train_datagen = ImageDataGenerator(
32
+ rescale=1.0 / 255,
33
+ rotation_range=90,
34
+ zoom_range=0.1,
35
+ width_shift_range=0.2,
36
+ height_shift_range=0.2,
37
+ shear_range=0.2,
38
+ # horizontal_flip=True,
39
+ vertical_flip=True,
40
+ brightness_range=(0.2, 1),
41
+ fill_mode='nearest',
42
+ validation_split=0.2)
43
+
44
+ train_generator = train_datagen.flow_from_directory(
45
+ train_dir,
46
+ shuffle=True,
47
+ target_size=(img_width, img_height),
48
+ batch_size=BATCH_SIZE,
49
+ class_mode='categorical',
50
+ subset='training')
51
+
52
+ # a) Visualize samples from the dataset
53
+ class_names = ['paper', 'rock', 'scissors']
54
+ images, labels = train_generator.next()
55
+ plt.figure(figsize=(10, 10))
56
+ for i in range(9):
57
+ plt.subplot(3, 3, i + 1)
58
+ label_index = np.argmax(labels[i])
59
+ plt.title('Label: ' + class_names[label_index])
60
+ plt.imshow(images[i])
61
+ plt.tight_layout()
62
+ plt.axis('off')
63
+ plt.show()
64
+
65
+ validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
66
+ validation_generator = validation_datagen.flow_from_directory(
67
+ valid_dir,
68
+ target_size=(img_width, img_height),
69
+ batch_size=BATCH_SIZE,
70
+ class_mode='categorical')
71
+
72
+ # -------Callbacks-------------#
73
+ # It'll save the best trained weight
74
+ checkpoint = ModelCheckpoint(
75
+ filepath='best_weights.hdf5',
76
+ monitor='val_loss',
77
+ verbose=1,
78
+ save_best_only=True,
79
+ mode='min',
80
+ save_weights_only=False
81
+ )
82
+ # Early stop = in case of high Validation Loss
83
+ early_stop = EarlyStopping(
84
+ monitor='val_loss',
85
+ min_delta=0.001,
86
+ patience=5,
87
+ verbose=1,
88
+ mode='auto'
89
+ )
90
+ # Defining a learning rate reduction callback when its necessary it'll reduce
91
+ # the learning rate when its necessary
92
+ lr_reduction = ReduceLROnPlateau(
93
+ monitor='val_loss',
94
+ factor=0.2,
95
+ patience=2,
96
+ verbose=1,
97
+ mode='auto',
98
+ cooldown=1,
99
+ min_lr=0.000001
100
+ )
101
+ callbacks = [checkpoint, early_stop, lr_reduction]
102
+
103
+ # Load the pre-trained VGG16 model without the top layer
104
+ base_model = applications.VGG16(weights='imagenet', include_top=False, pooling='max',
105
+ input_shape=(img_width, img_height, 3))
106
+
107
+ # Freeze the pre-trained layers from 0-10,
108
+ # so they are not updated during training
109
+ for layer in base_model.layers[:10]:
110
+ layer.trainable = False
111
+ # b) summary of base model
112
+ base_model.summary()
113
+
114
+ # Adding custom layers on top of VGG16
115
+ model = Sequential()
116
+ model.add(base_model)
117
+ model.add(Flatten())
118
+ model.add(Dense(512, activation='relu', kernel_regularizer=l2(l2_reg)))
119
+ model.add(BatchNormalization())
120
+ model.add(Dropout(0.3))
121
+ model.add(Dense(3, activation='softmax', kernel_regularizer=l2(l2_reg)))
122
+ # b) summary of model
123
+ model.summary()
124
+
125
+ # Compile the model
126
+ model.compile(optimizer=opt,
127
+ loss='categorical_crossentropy',
128
+ metrics=['accuracy'])
129
+
130
+ # Finally we train the model with our desired adjustments
131
+ history = model.fit(
132
+ train_generator,
133
+ epochs=EPOCHS,
134
+ callbacks=callbacks,
135
+ validation_data=validation_generator)
136
+
137
+
138
+ # Plotting the Models 'accuracy' & 'loss'
139
+ def eval_plot(history):
140
+ plt.figure(figsize=(14, 5))
141
+
142
+ # Accuracy plot
143
+ plt.subplot(1, 2, 1)
144
+ acc = history.history['accuracy']
145
+ val_acc = history.history['val_accuracy']
146
+ epochs = range(len(acc))
147
+ acc_plot, = plt.plot(epochs, acc, 'r')
148
+ val_acc_plot, = plt.plot(epochs, val_acc, 'b')
149
+ plt.title('Training and Validation Accuracy')
150
+ plt.legend([acc_plot, val_acc_plot], ['Training Accuracy', 'Validation Accuracy'])
151
+
152
+ # Loss plot
153
+ plt.subplot(1, 2, 2)
154
+ loss = history.history['loss']
155
+ val_loss = history.history['val_loss']
156
+ epochs = range(len(loss))
157
+ loss_plot, = plt.plot(epochs, loss, 'r')
158
+ val_loss_plot, = plt.plot(epochs, val_loss, 'b')
159
+ plt.title('Training and Validation Loss')
160
+ plt.legend([loss_plot, val_loss_plot], ['Training Loss', 'Validation Loss'])
161
+ plt.tight_layout()
162
+ plt.show()
163
+
164
+
165
+ # Evaluate the Process to find out how well the model has been trained
166
+ def evaluate(model):
167
+ num_of_test_samples = len(validation_generator.filenames)
168
+
169
+ y_pred = model.predict(validation_generator, num_of_test_samples // BATCH_SIZE + 1)
170
+ y_pred = np.argmax(y_pred, axis=1)
171
+ print('\nConfusion Matrix\n')
172
+ print(confusion_matrix(validation_generator.classes, y_pred))
173
+ print('\n\nClassification Report\n')
174
+ target_names = ['Paper', 'Rock', 'Scissors']
175
+ print(classification_report(validation_generator.classes, y_pred, target_names=target_names))
176
+
177
+
178
+ eval_plot(history)
179
+ evaluate(model)
180
+ model.save('../Rock_Paper_Scissors_VGG16/RPS_Model.hdf5')