'HIGH Resolution Image and Colab PRO Crash

I have 1700 images of 1000*1000 Image height and Width. There are minor details in it, so I prefer to keep this size. Now, my google colab pro crashes. Please Help.

''' #@title IMAGE TO DATA, NORMALIZATION AND AUGMENTATION #Directories with Subdirectories as Classes for training and validation datasets %%capture train_dir = '/content/Dataset/Training' validation_dir = '/content/Dataset/Validation'

# Set batch size and Image Height and Width
batch_size = 32
IMG_HEIGHT, IMG_WIDTH = (1000,1000)


#Image to Data Transform using ImageDataGenerator of Keras

#Image to Data for Training Data
Dataset_Image_Training = ImageDataGenerator(rescale = 1./255, zoom_range=[0.8, 1.5], brightness_range= [0.8, 2.0])
train_data_gen =  Dataset_Image_Training.flow_from_directory(
                    batch_size= batch_size,
                    directory=train_dir,
                    shuffle=True,
                    target_size=(IMG_HEIGHT,IMG_WIDTH),
                    class_mode='binary')
#Image to Data for Validation Data
validation_image_generator = ImageDataGenerator(rescale=1./255, zoom_range=[0.8, 1.5], brightness_range= [0.8, 2.0])
val_data_gen = validation_image_generator.flow_from_directory(
                 batch_size=batch_size,
                 directory= validation_dir,
                 shuffle=True,
                 target_size=(IMG_HEIGHT,IMG_WIDTH),
                 class_mode= 'binary')
#Check Classes in Dataset
train_data_gen.class_indices

#@title Deep Learning CNN Model with Keras Seqential with **Dropout**
#%%capture
model = Sequential([
    Conv2D(32, (3,3), padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
    MaxPool2D(2,2),
    Dropout(0.5),
    Conv2D(64, (3,3), padding='same', activation='relu'),
    MaxPool2D(2,2),
    Dropout(0.5),
    Conv2D(128, (3,3), padding='same', activation='relu'),
    MaxPool2D(2,2),
    Dropout(0.5),
    Conv2D(256, (3,3), padding='same', activation='relu'),
    MaxPool2D(2,2),
    Dropout(0.5),
    Flatten(),
    Dense(512, activation='relu'),
    Dropout(0.5),
    Dense(1, activation='sigmoid')])

# Model Compilation
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

#Tensorboard Set up
import tensorflow as tf
import datetime
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)

#Checkpoint and earlystop setting
filepath = '/content/drive/My Drive/DL_Model.hdf5'
checkpoint = [tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_accuracy', mode='max', save_best_only=True, Save_weights_only = False, verbose = 1), 
              tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience = 15, verbose =1), [tensorboard_callback]]

#Model Fitting
hist = model.fit(
    train_data_gen,
    steps_per_epoch=None,
    epochs=500,
    validation_data=val_data_gen,
    validation_steps=None,
    callbacks = [checkpoint]
)

#Accuracy Print

train_acc = max(hist.history['accuracy'])
val_acc = max(hist.history['val_accuracy'])
train_loss = min(hist.history['loss'])
val_loss = min(hist.history['val_loss'])
print('Training accuracy is')
print(train_acc)
print('Validation accuracy is')
print(val_acc)
print('Training loss is')
print(train_loss)
print('Validation loss is')
print(val_loss)

#Load Tensorboard
%load_ext tensorboard
%tensorboard --logdir logs

'''



Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source