-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
118 lines (97 loc) · 4.42 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os
import csv
import numpy as np
import cv2
from keras.models import Sequential
from keras.layers import Cropping2D
from keras.layers.core import Flatten, Dense, Activation, Lambda, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
import sklearn
from sklearn.model_selection import train_test_split
samples = []
with open('../data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # Skip the header row
for line in reader:
samples.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
# Set scaling to shrink input image
scaling = 0.3
height = int(scaling * 160)
width = int(scaling * 320)
crop_top = int(50 * scaling)
crop_bottom = int(20 * scaling)
# Generator to yield images as needed
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
path = '../data/'
# Read in the images
center_image = cv2.imread(path + batch_sample[0])
left_image = cv2.imread(path + batch_sample[1].lstrip())
right_image = cv2.imread(path + batch_sample[2].lstrip())
# Shrink the images to the scaling factor specified
center_image = cv2.resize(center_image, (width, height), interpolation = cv2.INTER_AREA)
left_image = cv2.resize(left_image, (width, height), interpolation = cv2.INTER_AREA)
right_image = cv2.resize(right_image, (width, height), interpolation = cv2.INTER_AREA)
# Convert images to RGB color space
center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)
left_image = cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB)
right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB)
# Initialize the angles
center_angle = float(batch_sample[3])
left_angle = center_angle + 0.25
right_angle = center_angle - 0.25
# Flip half of the images in the horizontal axis
if(np.random.uniform() < 0.5):
center_image = cv2.flip(center_image, 1)
left_image = cv2.flip(left_image, 1)
right_image = cv2.flip(right_image, 1)
center_angle = -1*center_angle
left_angle = -1*left_angle
right_angle = -1*right_angle
# Append the images and angles to their respective arrays
images.append(center_image)
images.append(left_image)
images.append(right_image)
angles.append(center_angle)
angles.append(left_angle)
angles.append(right_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# Initialize the generators
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Cropping2D(cropping=((crop_top, crop_bottom), (0,0)), input_shape=(height, width, 3)))
model.add(Lambda(lambda x: x/255.0 - 0.5)) # Normalization
# Define our LeNet architecture (model)
model.add(Convolution2D(6,2,2,activation='tanh'))
model.add(Convolution2D(6,2,2,activation='tanh'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(50))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation('tanh'))
# Train use mean square errors as loss function, and the Adam optimizer
model.compile(loss='mse', optimizer=Adam(lr=5e-4))
model.fit_generator(train_generator, samples_per_epoch=
3*len(train_samples), validation_data=validation_generator,
nb_val_samples=3*len(validation_samples), nb_epoch=7)
model.save('model.h5')