'How do you call most recent training checkpoint (tensorflow), need to skip re-training upon execution?
i was wondering how to call tensorflow checkpoints from training my CNN model. I saved all the checkpoints to /com.docker.devenvironments.code/Checkpoints
but i am unsure of how to call them back so that the model does not have to retrain every time i execute the program.
I've attached my code below Thank you!
import pandas as pd
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import keras
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, MaxPool2D, AveragePooling2D, Input, BatchNormalization, MaxPooling2D, Activation, Flatten, Dense, Dropout
from keras.models import Model
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
from imblearn.over_sampling import RandomOverSampler
from keras.preprocessing import image
import scipy
import os
from random import randint
import gc
data = pd.read_csv('/com.docker.devenvironments.code/fer2013/icml_face_data.csv')
pixel_data = data[' pixels']
label_data = data['emotion']
len(label_data)
def preprocess_pixels(pixel_data):
images = []
for i in range(len(pixel_data)):
img = np.fromstring(pixel_data[i], dtype='int', sep=' ')
img = img.reshape(48,48,1)
images.append(img)
X = np.array(images)
return X
oversampler = RandomOverSampler(sampling_strategy='auto')
X_over, Y_over = oversampler.fit_resample(pixel_data.values.reshape(-1,1), label_data)
X_over_series = pd.Series(X_over.flatten())
X_over_series
X = preprocess_pixels(X_over_series)
Y = Y_over
Y = Y_over.values.reshape(Y.shape[0],1)
Y.shape
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, random_state = 45)
print(X_train.shape, Y_train.shape)
plt.imshow(X[10000,:,:,0])
def emotion_recognition(input_shape):
X_input = Input(input_shape)
X = Conv2D(32, kernel_size=(3,3), strides=(1,1), padding='valid')(X_input)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = Conv2D(64, (3,3), strides=(1,1), padding = 'same')(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = MaxPooling2D((2,2))(X)
X = Conv2D(64, (3,3), strides=(1,1), padding = 'valid')(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = Conv2D(128, (3,3), strides=(1,1), padding = 'same')(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = MaxPooling2D((2,2))(X)
X = Conv2D(128, (3,3), strides=(1,1), padding = 'valid')(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = MaxPooling2D((2,2))(X)
X = Flatten()(X)
X = Dense(200, activation='relu')(X)
X = Dropout(0.6)(X)
X = Dense(7, activation = 'softmax')(X)
model = Model(inputs=X_input, outputs=X)
return model
model = emotion_recognition((48,48,1))
adam = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
y_train = to_categorical(Y_train, num_classes=7)
y_test = to_categorical(Y_test, num_classes=7)
label_dict = {0 : 'Angry', 1 : 'Disgust', 2 : 'Fear', 3 : 'Happiness', 4 : 'Sad', 5 : 'Surprise', 6 : 'Neutral'}
preds = model.predict(X_train)
def get_class(preds):
pred_class = np.zeros((preds.shape[0],1))
for i in range(len(preds)):
pred_class[i] = np.argmax(preds[i])
return pred_class
pred_class_train = get_class(preds)
checkpoint_path = "/com.docker.devenvironments.code/Checkpoints/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1)
model.fit(X_train, y_train, epochs = 30, validation_data=(X_test,y_test), callbacks=[cp_callback])
img_path = '/com.docker.devenvironments.code/fer2013/testimgs/neutral/PublicTest_98333211.jpg'
img = image.load_img(img_path, grayscale=True, target_size=(48,48,1))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
prediction = np.argmax(model.predict(x))
print('The predicted emotion is : ' + label_dict[prediction])
my_image = image.load_img(img_path)
plt.imshow(my_image)
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|
