'Why is the accuracy of my model different on my confusion matrix than when I am training it?

I am using a dataset of 5 classes of images with around 4000 images in training data set, and 2000 in testing dataset.

from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras import optimizers
from matplotlib import pyplot as plt
import numpy as np

import seaborn as sns

from sklearn.metrics import confusion_matrix



img_width, img_height = 512, 384

categories = ["cardboard", "glass", "metal", "paper", "plastic"]

train_data_dir = '/Users/lukasrois/ve/Train_Data'
test_data_dir = '/Users/lukasrois/ve/Test_Data'

classifier = Sequential()




if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)


classifier.add(Conv2D(64,(3,3),input_shape = (64,64,3), activation= 'relu'))
classifier.add(Dropout(.1))
classifier.add(MaxPooling2D(pool_size=(2,2)))



classifier.add(Conv2D(32,(3,3),input_shape = (32,32,3), activation= 'relu'))
classifier.add(Dropout(.1))
classifier.add(MaxPooling2D(pool_size=(2,2)))



classifier.add(Flatten())
classifier.add(Dense(1024, activation='relu'))
classifier.add(Dense(1024, activation='relu'))
classifier.add(Dense(5, activation='softmax'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])



train_datagen = ImageDataGenerator(
    rescale = 1./255,
    shear_range = 0.2,
    zoom_range = 0.2,
    horizontal_flip=True
)

test_datagen = ImageDataGenerator(rescale=1./255)

train_set = train_datagen.flow_from_directory(train_data_dir, target_size=(64,64),
                                              batch_size=10, class_mode='categorical', shuffle=True)

test_set = test_datagen.flow_from_directory(test_data_dir, target_size=(64,64),
                                              batch_size=10, class_mode='categorical', shuffle=True)


nb_train_samples = len(train_set)
nb_validation_samples = len(test_set)

train_labels = train_set.classes


hist = classifier.fit_generator(train_set, steps_per_epoch=None, epochs=50,
                                validation_data=test_set, shuffle=True)


plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()


y_pred = classifier.predict_generator(test_set)
y_pred = np.rint(y_pred)

y_true = test_set.classes

predict_class = np.argmax(y_pred, axis=1)
predict_class = predict_class.tolist()

print(confusion_matrix(y_true, predict_class))

sns.heatmap(confusion_matrix(y_true, predict_class), square=True, annot=True, cmap='Blues', fmt='d', cbar=False)

When I am training my model this is the output after it was finished training:

Epoch 50/50
426/426 [==============================] - 336s 788ms/step - loss: 0.0405 - acc: 0.9881 - val_loss: 0.5690 - val_acc: 0.8882

And my confusion matrix looks like this:

[[ 17  38  15  35  16]
 [ 80 280  80 173 143]
 [ 45 129  55  76  49]
 [ 54 187  56 121  76]
 [ 43 140  50  85  87]]

However, if I add up all the correct features on the confusion matrix and divide it by the total of incorrect features I get 560/1570= 0.36. So why is the accuracy different?

EDIT I have changed the method of how I create a confusion matrix. I made my own function which does not rely on the test_set, Like this:


def config_confusion_matrix():
    actual_values = []
    predicted_values = []
    for i in range(50):
        c = categories.index(random.choice(categories))
        r = categories[c]
        path = "/Users/lukasrois/ve/Test_Data/"+r+"/"
        random_filename = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
        new_path = "/Users/lukasrois/ve/Test_Data/"+r+"/"+random_filename
        result = cast_predict(new_path)
        predicted_values.append(result)
        actual_values.append(c)

    return (actual_values, predicted_values)

Confusion matrix:

array([[ 6,  0,  0,  0,  4],
       [ 0,  0,  5,  0,  3],
       [ 0,  0,  8,  0,  0],
       [ 3,  1, 10,  0,  1],
       [ 0,  4,  5,  0,  0]])

Current accuracy:

>>> classifier.evaluate_generator(test_set)
[0.28701336261618293, 0.9285955914520505]

However, my confusion matrix still does not reflect the same accuracy when training. Why?



Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source