forked from spmallick/learnopencv
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_model.py
97 lines (79 loc) · 2.74 KB
/
train_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# import required modules
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
import matplotlib.pyplot as plt
# import created model
from net import Net
# Dimensions of our images
img_width, img_height = 32, 32
# 3 channel image
no_of_channels = 3
# train data Directory
train_data_dir = 'train/'
# test data Directory
validation_data_dir = 'test/'
epochs = 80
batch_size = 32
#initialize model
model = Net.build(width = img_width, height = img_height, depth = no_of_channels)
print('building done')
# Compile model
rms = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
print('optimizing done')
model.compile(loss='categorical_crossentropy',
optimizer=rms,
metrics=['accuracy'])
print('compiling')
# this is the augmentation configuration used for training
# horizontal_flip = False, as we need to retain Characters
train_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rescale=1. / 255,
shear_range=0.1,
zoom_range=0.1,
rotation_range=5,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=False)
# this is the augmentation configuration used for testing, only rescaling
test_datagen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True, rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# fit the model
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples / batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_generator.samples / batch_size)
# evaluate on validation dataset
model.evaluate_generator(validation_generator)
# save weights in a file
model.save_weights('trained_weights.h5')
print(history.history)
# Loss Curves
plt.figure(figsize=[8,6])
plt.plot(history.history['loss'],'r',linewidth=3.0)
plt.plot(history.history['val_loss'],'b',linewidth=3.0)
plt.legend(['Training loss', 'Validation Loss'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Loss',fontsize=16)
plt.title('Loss Curves',fontsize=16)
# Accuracy Curves
plt.figure(figsize=[8,6])
plt.plot(history.history['acc'],'r',linewidth=3.0)
plt.plot(history.history['val_acc'],'b',linewidth=3.0)
plt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Accuracy',fontsize=16)
plt.title('Accuracy Curves',fontsize=16)
plt.show()