'save and export a model when using .flow_from_directory()
Im using the below in SageMaker in script Mode, works fine but I don't know how to save and export the model to S3. I have googled a few ways but none work or Im adding the code in the wrong place.
Ive looked at example from https://guillaumegenthial.github.io/serving-tensorflow-estimator.html#exporting-the-estimator-as-a-tfsaved_model:
def serving_input_receiver_fn():
number = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='number')
receiver_tensors = {'number': number}
features = tf.tile(number, multiples=[1, 2])
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
estimator = tf.estimator.Estimator(model_fn, 'model', params={})
estimator.export_saved_model('saved_model', serving_input_receiver_fn)
Suggestions? Am I looking at the correct method? Or I guess Im not implementing properly.
# https://sagemaker-workshop.com/custom/algo.html
import tensorflow as tf
import argparse, os
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from keras import backend as K
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu-count', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch_size', type=float, default=63)
parser.add_argument('--img_height', type=int, default=540)
parser.add_argument('--img_width', type=int, default=960)
parser.add_argument('--num_class', type=int, default=2)
args, _ = parser.parse_known_args()
model_dir = args.model_dir
EPOCHS = args.epochs
HEIGHT = args.img_height
WIDTH = args.img_width
NUM_CLASSES = args.num_class
batch_size = args.batch_size
DEPTH = 3
def keras_model_fn():
model = tf.keras.Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(HEIGHT, WIDTH, DEPTH),
activation="relu", name="inputs", padding="same"))
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D())
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D())
model.add(Conv2D(128, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(Conv2D(128, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D())
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dense(NUM_CLASSES))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def serving_input_receiver_fn(hyperparameters):
tensor = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH, DEPTH])
inputs = {INPUT_TENSOR_NAME: tensor}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def train_input_fn(training_dir, hyperparameters):
return _input(tf.estimator.ModeKeys.TRAIN, batch_size=BATCH_SIZE, data_dir=training_dir, epochs=EPOCHS)
def eval_input_fn(training_dir, hyperparameters):
return _input(tf.estimator.ModeKeys.EVAL, batch_size=BATCH_SIZE, data_dir=training_dir, epochs=EPOCHS)
def _input(mode, batch_size, data_dir, epochs):
if mode == tf.estimator.ModeKeys.TRAIN:
datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
else:
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(data_dir,
target_size=(HEIGHT, WIDTH),
batch_size=batch_size,
epochs=epochs)
images, labels = generator.next()
return {INPUT_TENSOR_NAME: images}, labels
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|
