'Input to reshape is a tensor with 3072 values, but the requested shape requires a multiple of 4000 in capsNet
I was using a CapsNet-GRU method. But I have an error code. Input to reshape is a tensor with 3072 values, but the requested shape requires a multiple of 4000 [[{{node model_5/primarycap_reshape/Reshape}}]] [Op:__inference_train_function_21550]. this is my capsulelayer
import TensorFlow.keras.backend as K from tensorflow.keras import initializers, layers
class Length(layers.Layer):
"""
Compute the length of vectors. This is used to compute a Tensor that has the same shape with y_true in margin_loss.
Using this layer as model's output can directly predict labels by using y_pred = np.argmax(model.predict(x), 1)
inputs: shape=[None, num_vectors, dim_vector]
output: shape=[None, num_vectors]
"""
def call(self, inputs, **kwargs):
print("(tf.sqrt(tf.reduce_sum(tf.square(inputs), -1) + K.epsilon())",tf.sqrt(tf.reduce_sum(tf.square(inputs), -1) + K.epsilon()))
return tf.sqrt(tf.reduce_sum(tf.square(inputs), -1) + K.epsilon())
def compute_output_shape(self, input_shape):
print("input_shape[:-1] : ",input_shape[:-1])
return input_shape[:-1]
def get_config(self):
config = super(Length, self).get_config()
print("get_config : ",config)
return config
class Mask(layers.Layer):
"""
Mask a Tensor with shape=[None, num_capsule, dim_vector] either by the capsule with max length or by an additional
input mask. Except the max-length capsule (or specified capsule), all vectors are masked to zeros. Then flatten the
masked Tensor.
For example:
x = keras.layers.Input(shape=[8, 3, 2]) # batch_size=8, each sample contains 3 capsules with dim_vector=2 y = keras.layers.Input(shape=[8, 3]) # True labels. 8 samples, 3 classes, one-hot coding. out = Mask()(x) # out.shape=[8, 6] # or out2 = Mask()([x, y]) # out2.shape=[8,6]. Masked with true labels y. Of course y can also be manipulated.
"""
def call(self, inputs, **kwargs):
if type(inputs) is list: # true label is provided with shape = [None, n_classes], i.e. one-hot code.
assert len(inputs) == 2
inputs, mask = inputs
else: # if no true label, mask by the max length of capsules. Mainly used for prediction
# compute lengths of capsules
x = tf.sqrt(tf.reduce_sum(tf.square(inputs), -1))
# generate the mask which is a one-hot code.
# mask.shape=[None, n_classes]=[None, num_capsule]
mask = tf.one_hot(indices=tf.argmax(x, 1), depth=x.shape[1])
# inputs.shape=[None, num_capsule, dim_capsule]
# mask.shape=[None, num_capsule]
# masked.shape=[None, num_capsule * dim_capsule]
masked = K.batch_flatten(inputs * tf.expand_dims(mask, -1))
print(masked)
return masked
def compute_output_shape(self, input_shape):
if type(input_shape[0]) is tuple: # true label provided
print("input_shape[0] : ",input_shape[0])
return tuple([None, input_shape[0][1] * input_shape[0][2]])
else: # no true label provided
return tuple([None, input_shape[1] * input_shape[2]])
def get_config(self):
config = super(Mask, self).get_config()
print("config : ",config)
return config
def squash(vectors, axis=-1): """ The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0 :param vectors: some vectors to be squashed, N-dim tensor :param axis: the axis to squash :return: a Tensor with same shape as input vectors """ s_squared_norm = tf.reduce_sum(tf.square(vectors), axis, keepdims=True) scale = s_squared_norm / (1 + s_squared_norm) / tf.sqrt(s_squared_norm + K.epsilon()) result = scale * vectors print("result : ",result) return result
class CapsuleLayer(layers.Layer):
"""
The capsule layer. It is similar to Dense layer. Dense layer has in_num
inputs, each is a scalar, the output of the
neuron from the former layer, and it has out_num
output neurons. CapsuleLayer just expand the output of the neuron
from scalar to vector. So its input shape = [None, input_num_capsule, input_dim_capsule] and output shape =
[None, num_capsule, dim_capsule]. For Dense Layer, input_dim_capsule = dim_capsule = 1.
:param num_capsule: number of capsules in this layer
:param dim_capsule: dimension of the output vectors of the capsules in this layer
:param routings: number of iterations for the routing algorithm
"""
def init(self, num_capsule, dim_capsule, routings=3,
kernel_initializer='glorot_uniform',
**kwargs):
super(CapsuleLayer, self).init(**kwargs)
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
assert len(input_shape) >= 3, "The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]"
self.input_num_capsule = input_shape[1]
self.input_dim_capsule = input_shape[2]
# Transform matrix, from each input capsule to each output capsule, there's a unique weight as in Dense layer.
self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,
self.dim_capsule, self.input_dim_capsule],
initializer=self.kernel_initializer,
name='W')
self.built = True
def call(self, inputs, training=None):
# inputs.shape=[None, input_num_capsule, input_dim_capsule]
# inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule, 1]
inputs_expand = tf.expand_dims(tf.expand_dims(inputs, 1), -1)
print("inputs_expand : ",inputs_expand)
# Replicate num_capsule dimension to prepare being multiplied by W
# inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule, 1]
inputs_tiled = tf.tile(inputs_expand, [1, self.num_capsule, 1, 1, 1])
print("inputs_tiled : ",inputs_tiled)
# Compute `inputs * W` by scanning inputs_tiled on dimension 0.
# W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
# x.shape=[num_capsule, input_num_capsule, input_dim_capsule, 1]
# Regard the first two dimensions as `batch` dimension, then
# matmul(W, x): [..., dim_capsule, input_dim_capsule] x [..., input_dim_capsule, 1] -> [..., dim_capsule, 1].
# inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
inputs_hat = tf.squeeze(tf.map_fn(lambda x: tf.matmul(self.W, x), elems=inputs_tiled))
print("inputs_hat : ",inputs_hat)
# Begin: Routing algorithm ---------------------------------------------------------------------#
# The prior for coupling coefficient, initialized as zeros.
# b.shape = [None, self.num_capsule, 1, self.input_num_capsule].
b = tf.zeros(shape=[inputs.shape[0], self.num_capsule, 1, self.input_num_capsule])
print("b : ",b)
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
# c.shape=[batch_size, num_capsule, 1, input_num_capsule]
c = tf.nn.softmax(b, axis=1)
# c.shape = [batch_size, num_capsule, 1, input_num_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [..., 1, input_num_capsule] x [..., input_num_capsule, dim_capsule] -> [..., 1, dim_capsule].
# outputs.shape=[None, num_capsule, 1, dim_capsule]
outputs = squash(tf.matmul(c, inputs_hat)) # [None, 10, 1, 16]
if i < self.routings - 1:
# outputs.shape = [None, num_capsule, 1, dim_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension, then
# matmal:[..., 1, dim_capsule] x [..., input_num_capsule, dim_capsule]^T -> [..., 1, input_num_capsule].
# b.shape=[batch_size, num_capsule, 1, input_num_capsule]
b += tf.matmul(outputs, inputs_hat, transpose_b=True)
# End: Routing algorithm -----------------------------------------------------------------------#
return tf.squeeze(outputs)
def compute_output_shape(self, input_shape):
return tuple([None, self.num_capsule, self.dim_capsule])
def get_config(self):
config = {
'num_capsule': self.num_capsule,
'dim_capsule': self.dim_capsule,
'routings': self.routings
}
base_config = super(CapsuleLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding):
"""
Apply Conv2D n_channels
times and concatenate all capsules
:param inputs: 4D tensor, shape=[None, width, height, channels]
:param dim_capsule: the dim of the output vector of capsule
:param n_channels: the number of types of capsules
:return: output tensor, shape=[None, num_capsule, dim_capsule]
"""
output = layers.Conv2D(filters=dim_capsule*n_channels, kernel_size=kernel_size, strides=strides, padding=padding,
name='primarycap_conv2d')(inputs)
outputs = layers.Reshape(target_shape=[-1, dim_capsule], name='primarycap_reshape')(output)
print("output prim : ",outputs)
return layers.Lambda(squash, name='primarycap_squash')(outputs)
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|