'InvalidArgumentError: model.predict ConcatOp - Using Multiple of 32 still getting the error
I am trying to create a text similarity mapper using NN and Google Sentence encoder. I have trained the model and it has worked perfectly fine but when I try to predict it throws an error, I have tried breaking down the concatenation part but there seems to be some issue. Below is the code that has been used:-
#MODEL THAT CONDENSES THE USE
input1 = tf.keras.layers.Input(shape = (512), dtype=tf.float32)
dense_1 = tf.keras.layers.Dense(256, activation="selu")
dense_2 = tf.keras.layers.Dense(128, activation="selu")
#tied_encoder
l_flatten = tf.keras.layers.Flatten()(input1)
l_en1 = dense_1(l_flatten)
l_en2 = dense_2(l_en1)
# normalized bottleneck
l_en2 = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis = 1))(l_en2)
#tied_decoder
l_dc1 = DenseTranspose(dense_2, activation="selu")(l_en2)
l_dc2 = DenseTranspose(dense_1, activation="linear")(l_dc1)
input2 = tf.keras.layers.Input(shape = (512), dtype=tf.float32)
dense1 = tf.keras.layers.Dense(256, activation="selu")
dense2 = tf.keras.layers.Dense(128, activation="selu")
#tied_encoder
lflatten = tf.keras.layers.Flatten()(input2)
len1 = dense1(lflatten)
len2 = dense2(len1)
# normalized bottleneck
len2 = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis = 1))(len2)
#tied_decoder
ldc1 = DenseTranspose(dense2, activation="selu")(len2)
ldc2 = DenseTranspose(dense1, activation="linear")(ldc1)
#e1 = emb(input1)
input3gtin= tf.keras.layers.Input(shape=(1,), dtype=tf.string)
input4cupc=tf.keras.layers.Input(shape=(1,), dtype=tf.string)
#input5mpc = tf.keras.layers.Input(shape=(1,), dtype=tf.string)
Vectorize_layer1 = tf.keras.layers.TextVectorization(standardize=None, output_mode = "int")
Vectorize_layer1.adapt(np.vstack(unmapped['GTIN']))
Vectorize_layer2 = tf.keras.layers.TextVectorization(standardize=None, output_mode = "int")
Vectorize_layer2.adapt(np.vstack(unmapped['CUPC']))
#Vectorize_layer3= tf.keras.layers.TextVectorization(standardize=None, output_mode = "int")
#Vectorize_layer3.adapt(np.vstack(unmapped['O_MPC']))
v1 = Vectorize_layer1(input3gtin)
v2 = Vectorize_layer2(input4cupc)
#v3 = Vectorize_layer3(input5mpc)
embeddingG = tf.keras.layers.Embedding(input_dim= len(infodf), output_dim=128)
embeddingC = tf.keras.layers.Embedding(input_dim= len(infodf), output_dim=128)
#embeddingM = tf.keras.layers.Embedding(input_dim= len(infodf), output_dim=128)
embG = embeddingG(v1)
embC = embeddingC(v2)
#embM= embeddingM(v3)
#flatten = tf.keras.layers.Flatten()
#flG= flatten(embG)
#flC = flatten(embC)
#flM = flatten(embM)
reshape = tf.keras.layers.GlobalAveragePooling1D()
flG = reshape(embG)
flC=reshape(embC)
x = tf.keras.layers.Concatenate(axis=1)([l_dc2, ldc2])
y = tf.keras.layers.Concatenate(axis=1)([flG, flC])
z= tf.keras.layers.Concatenate(axis=1)([x,y])
This is the error I am getting:-
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/tmp/ipykernel_20983/227920613.py in <module>
----> 1 y_pred = model.predict([embeddings_names, embeddings_descriptions , np.vstack(unmapped['GTIN']), \
2 np.vstack(unmapped['CUPC']), np.vstack(unmapped['O_MPC'])])
~/anaconda3/lib/python3.9/site-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
~/anaconda3/lib/python3.9/site-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
7184 def raise_from_not_ok_status(e, name):
7185 e.message += (" name: " + name if name is not None else "")
-> 7186 raise core._status_to_exception(e) from None # pylint: disable=protected-access
7187
7188
InvalidArgumentError: ConcatOp : Dimension 1 in both shapes must be equal: shape[0] = [32,1024] vs. shape[1037] = [32,1408] [Op:ConcatV2] name: concat
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|