-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
115 lines (87 loc) · 3.41 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import csv
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.layers import Flatten, Dense, Conv2D, Dropout, Lambda, Cropping2D
from keras.callbacks import EarlyStopping
import os
from PIL import Image
# Set which parts of code to run
runModel = False
runTest = False
# Steering correction for the left and right camera angles, so they are perceived as center angles
steerCorrect = 0.2
# Mute output of tensorflow GPU info (makes output cleaner)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# The amount of epochs allowed without any improvement to val_loss
validationStop = 2
lines = []
# Read in csv data
with open('Training Data/driving_log.csv') as csvfile:
read = csv.reader(csvfile)
for line in read:
lines.append(line)
images = []
steeringAng = []
for line in lines:
# Read in center steering angles
center = float(line[3])
# Create left and right steering angles based on the center and steering correction value set above
steerLeft = center + steerCorrect
steerRight = center - steerCorrect
# Read in images as arrays
direct = 'Training Data/'
imgCenter = np.asarray(Image.open(direct + line[0]))
imgLeft = np.asarray(Image.open(direct + line[1].strip(" ")))
imgRight = np.asarray(Image.open(direct + line[2].strip(" ")))
# Append images and steering angles to two master lists
images.append(imgCenter)
images.append(imgLeft)
images.append(imgRight)
steeringAng.append(center)
steeringAng.append(steerLeft)
steeringAng.append(steerRight)
# Convert lists into arrays and assign them to training labels
X_train = np.array(images)
Y_train = np.array(steeringAng)
# Create test set with 20% of the data
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.2, random_state=1)
# Print off data set characteristics:
xtrSize = len(X_train)
xteSize = len(X_test)
print("Number of training examples: ", round(xtrSize * 0.8))
print("Number of validation examples: ", round(xtrSize * 0.2))
print("Number of testing examples: ", xteSize)
# Define early stopping procedure when validation loss is not improving
early_stop = EarlyStopping(monitor='val_loss', patience=validationStop)
# Original image shape (160,320,3)
if runModel:
# Model Arc - NVIDIA with dropout to reduce overfitting
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Conv2D(24, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Conv2D(36, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Conv2D(48, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Conv2D(64, 3, 3, activation="relu"))
model.add(Conv2D(64, 3, 3, activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.25))
model.add(Dense(50))
model.add(Dropout(0.25))
model.add(Dense(10))
model.add(Dropout(0.25))
model.add(Dense(1))
# Select compiler paramaters
model.compile(loss='mse', optimizer='adam')
# Train model
model.fit(X_train, Y_train, validation_split=0.2, shuffle=True, nb_epoch=10, verbose=2, callbacks=[early_stop])
# Save model
model.save('model.h5')
print("Saved Model")
# Curiosity
if runTest:
model = load_model('model.h5')
evaluate = model.evaluate(X_test, Y_test)
print(evaluate*100)