Skip to content

Commit

Permalink
improved weed model
Browse files Browse the repository at this point in the history
  • Loading branch information
dreossi committed Jan 11, 2018
1 parent 9733568 commit 38e47d0
Show file tree
Hide file tree
Showing 10 changed files with 58 additions and 150 deletions.
4 changes: 2 additions & 2 deletions ai/classifier/checkpoint
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
model_checkpoint_path: "pero-model"
all_model_checkpoint_paths: "pero-model"
model_checkpoint_path: "weed-model"
all_model_checkpoint_paths: "weed-model"
7 changes: 4 additions & 3 deletions ai/classifier/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def load_train(train_path, image_size, classes):
cls = []

print('Going to read training images')
for fields in classes:
for fields in classes:
index = classes.index(fields)
print('Now going to read {} files (Index: {})'.format(fields, index))
path = os.path.join(train_path, fields, '*g')
Expand Down Expand Up @@ -83,7 +83,6 @@ def next_batch(self, batch_size):
self._epochs_done += 1
start = 0
self._index_in_epoch = batch_size

assert batch_size <= self._num_examples
end = self._index_in_epoch

Expand All @@ -96,7 +95,7 @@ class DataSets(object):
data_sets = DataSets()

images, labels, img_names, cls = load_train(train_path, image_size, classes)
images, labels, img_names, cls = shuffle(images, labels, img_names, cls)
images, labels, img_names, cls = shuffle(images, labels, img_names, cls)

if isinstance(validation_size, float):
validation_size = int(validation_size * images.shape[0])
Expand All @@ -115,3 +114,5 @@ class DataSets(object):
data_sets.valid = DataSet(validation_images, validation_labels, validation_img_names, validation_cls)

return data_sets


39 changes: 0 additions & 39 deletions ai/classifier/demo.py

This file was deleted.

57 changes: 0 additions & 57 deletions ai/classifier/model.py

This file was deleted.

Binary file removed ai/classifier/pero-model.data-00000-of-00001
Binary file not shown.
Binary file removed ai/classifier/pero-model.index
Binary file not shown.
101 changes: 52 additions & 49 deletions ai/classifier/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import random
import numpy as np

# Adding seed so that random initialization is consistent
#Adding Seed so that random initialization is consistent
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
Expand All @@ -15,18 +15,17 @@

batch_size = 32

# Prepare input data
classes = ['bad','good']
#Prepare input data
classes = ['good','bad']
num_classes = len(classes)

# 20% of the data will be used for validation
# 20% of the data will automatically be used for validation
validation_size = 0.2
img_size = 128
num_channels = 3
train_path='./data/train/'
check_point_name = './pero-model'
train_path='training_data'

# Load training and validation images and labels
# We shall load all the training and validation images and labels into memory using openCV and use that during training
data = dataset.read_train_sets(train_path, img_size, classes, validation_size=validation_size)


Expand All @@ -39,14 +38,13 @@
session = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, img_size,img_size,num_channels], name='x')

# Labels
## labels
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)



# Network graph params

##Network graph params
filter_size_conv1 = 3
num_filters_conv1 = 32

Expand All @@ -61,66 +59,76 @@
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))


def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))


def create_convolutional_layer(input, num_input_channels, conv_filter_size, num_filters):
'''Create a convolutional layer + max pool + relu activation'''

# Trainable weights and biases
def create_convolutional_layer(input,
num_input_channels,
conv_filter_size,
num_filters):

## We shall define the weights that will be trained using create_weights function.
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
## We create biases using the create_biases function. These are also trained.
biases = create_biases(num_filters)

# Create the convolutional layer
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME')
layer += biases
## Creating the convolutional layer
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')

# Max-pooling.
layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
layer += biases

# Relu activation function
## We shall be using max-pooling.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
## Output of pooling is fed to Relu which is the activation function for us.
layer = tf.nn.relu(layer)

return layer


def create_flatten_layer(layer):
'''Flatten layer of dimension [batch_size img_size img_size num_channels] to single column tensor'''

def create_flatten_layer(layer):
#We know that the shape of the layer will be [batch_size img_size img_size num_channels]
# But let's get it from the previous layer.
layer_shape = layer.get_shape()

## Number of features will be img_height * img_width* num_channels. But we shall calculate it in place of hard-coding it.
num_features = layer_shape[1:4].num_elements()

# Flatten layer reshaped to num_features
## Now, we Flatten the layer so we shall have to reshape to num_features
layer = tf.reshape(layer, [-1, num_features])

return layer


def create_fc_layer(input, num_inputs, num_outputs, use_relu=True):
'''Create fully connected layer'''
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):

#Trainable weights and biases
#Let's define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)

# Fully connected layer takes input x and produces wx+b
# Fully connected layer takes input x and produces wx+b.Since, these are matrices, we use matmul function in Tensorflow
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)

return layer



# Netwok graph

layer_conv1 = create_convolutional_layer(input=x,
num_input_channels=num_channels,
conv_filter_size=filter_size_conv1,
num_filters=num_filters_conv1)

layer_conv2 = create_convolutional_layer(input=layer_conv1,
num_input_channels=num_filters_conv1,
conv_filter_size=filter_size_conv2,
Expand All @@ -144,13 +152,11 @@ def create_fc_layer(input, num_inputs, num_outputs, use_relu=True):
use_relu=False)

y_pred = tf.nn.softmax(layer_fc2,name='y_pred')
y_pred_cls = tf.argmax(y_pred, dimension=1)

y_pred_cls = tf.argmax(y_pred, dimension=1)
session.run(tf.global_variables_initializer())


# Training functions
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2, labels=y_true)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
Expand All @@ -161,42 +167,39 @@ def create_fc_layer(input, num_inputs, num_outputs, use_relu=True):


def show_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
'''Show progress while training'''
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Training Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation Accuracy: {2:>6.1%}, Validation Loss: {3:.3f}"
print(msg.format(epoch + 1, acc, val_acc, val_loss))


total_iterations = 0

saver = tf.train.Saver()
def train(num_iteration):
'''Training loop'''

global total_iterations

for i in range(total_iterations, total_iterations + num_iteration):
for i in range(total_iterations,
total_iterations + num_iteration):

# Fecth batch
x_batch, y_true_batch, _, _ = data.train.next_batch(batch_size)
x_valid_batch, y_valid_batch, _, _ = data.valid.next_batch(batch_size)
x_batch, y_true_batch, _, cls_batch = data.train.next_batch(batch_size)
x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(batch_size)


feed_dict_tr = {x: x_batch, y_true: y_true_batch}
feed_dict_val = {x: x_valid_batch, y_true: y_valid_batch}
feed_dict_tr = {x: x_batch,
y_true: y_true_batch}
feed_dict_val = {x: x_valid_batch,
y_true: y_valid_batch}

session.run(optimizer, feed_dict=feed_dict_tr)

# Show progress and save learnt parameters
if i % int(data.train.num_examples/batch_size) == 0:
val_loss = session.run(cost, feed_dict=feed_dict_val)
epoch = int(i / int(data.train.num_examples/batch_size))

show_progress(epoch, feed_dict_tr, feed_dict_val, val_loss)
saver.save(session, check_point_name)
saver.save(session, 'weed-model')


total_iterations += num_iteration

train(num_iteration=3000)
train(num_iteration=1000)
Binary file added ai/classifier/weed-model.data-00000-of-00001
Binary file not shown.
Binary file added ai/classifier/weed-model.index
Binary file not shown.
Binary file not shown.

0 comments on commit 38e47d0

Please sign in to comment.