'RuntimeError: Given input size: (512x1x3). Calculated output size: (512x0x1)

Can somebody guide me on what should I do for resolving this error, please? I am using the pretrained vgg model. And I am trying to train the patches that consist of (62 * 62) size. These is the following errors and I think it is related to the pooling layer but i am not able to understand what should I have to change

enter code here

Classifier(
      (layer): Sequential(
        (0): Linear(in_features=45, out_features=5, bias=True)
      )
    )
    torch.Size([50, 1, 62, 62])    

    
    ---------------------------------------------------------------------------
    RuntimeError                              Traceback (most recent call last)
    File /data/heart_ct/torch/ct_pretrained.py:196, in <module>
        194 for epoch in range(num_epochs):
        195     start_time = time.time()
    --> 196     loss, metric = train(model, train_loader, optimizer)
        197     val_loss, val_metric = validate(model, val_loader)
        198     scheduler.step()
    
    File /data/heart_ct/torch/ct_pretrained.py:53, in train(model, loader, optimizer)
         50 axial, sagittal, coronal, emr, target = axial.cuda(), sagittal.cuda(), coronal.cuda(), emr.cuda(), target.cuda()
         52 optimizer.zero_grad()
    ---> 53 output = model(axial, sagittal, coronal, emr)
         54 loss = loss_fn(output, target)
         55 loss.backward()
    
    File ~/miniconda/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
        725     result = self._slow_forward(*input, **kwargs)
        726 else:
    --> 727     result = self.forward(*input, **kwargs)
        728 for hook in itertools.chain(
        729         _global_forward_hooks.values(),
        730         self._forward_hooks.values()):
        731     hook_result = hook(self, input, result)
    
    File /data/heart_ct/torch/models/vggNet.py:82, in vggNet.forward(self, axial, sagittal, coronal, emr)
         75 print(axial.shape)
         80 axial_feature = self.axial_model(axial)
    ---> 82 sagittal_feature = self.sa_co_model(sagittal)
         84 coronal_feature = self.sa_co_model(coronal)
         86 out = torch.cat([axial_feature, sagittal_feature, coronal_feature], dim=1)
    
    File ~/miniconda/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
        725     result = self._slow_forward(*input, **kwargs)
        726 else:
    --> 727     result = self.forward(*input, **kwargs)
        728 for hook in itertools.chain(
        729         _global_forward_hooks.values(),
        730         self._forward_hooks.values()):
        731     hook_result = hook(self, input, result)
    
    File ~/miniconda/lib/python3.8/site-packages/torchvision/models/vgg.py:43, in VGG.forward(self, x)
         42 def forward(self, x):
    ---> 43     x = self.features(x)
         44     x = self.avgpool(x)
         45     x = torch.flatten(x, 1)
    
    File ~/miniconda/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
        725     result = self._slow_forward(*input, **kwargs)
        726 else:
    --> 727     result = self.forward(*input, **kwargs)
        728 for hook in itertools.chain(
        729         _global_forward_hooks.values(),
        730         self._forward_hooks.values()):
        731     hook_result = hook(self, input, result)
    
    File ~/miniconda/lib/python3.8/site-packages/torch/nn/modules/container.py:117, in Sequential.forward(self, input)
        115 def forward(self, input):
        116     for module in self:
    --> 117         input = module(input)
        118     return input
    
    File ~/miniconda/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
        725     result = self._slow_forward(*input, **kwargs)
        726 else:
    --> 727     result = self.forward(*input, **kwargs)
        728 for hook in itertools.chain(
        729         _global_forward_hooks.values(),
        730         self._forward_hooks.values()):
        731     hook_result = hook(self, input, result)
    
    File ~/miniconda/lib/python3.8/site-packages/torch/nn/modules/pooling.py:153, in MaxPool2d.forward(self, input)
        152 def forward(self, input: Tensor) -> Tensor:
    --> 153     return F.max_pool2d(input, self.kernel_size, self.stride,
        154                         self.padding, self.dilation, self.ceil_mode,
        155                         self.return_indices)
    
    File ~/miniconda/lib/python3.8/site-packages/torch/_jit_internal.py:267, in boolean_dispatch.<locals>.fn(*args, **kwargs)
        265     return if_true(*args, **kwargs)
        266 else:
    --> 267     return if_false(*args, **kwargs)
    
    File ~/miniconda/lib/python3.8/site-packages/torch/nn/functional.py:585, in _max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)
        583 if stride is None:
        584     stride = torch.jit.annotate(List[int], [])
    --> 585 return torch.max_pool2d(
        586     input, kernel_size, stride, padding, dilation, ceil_mode)
    
    RuntimeError: Given input size: (512x1x3). Calculated output size: (512x0x1). Output size is too small

this is my model architecture. I have added some layers as I am working on 3D images consisting of axial/sagittal/coronal. My model is 2D CNN

enter code here

import torch

import torch.nn as nn

from torchvision import models

__all__ = ['Vgg']

class Vgg(nn.Module):

    def __init__(self, is_emr=False, mode='sum'):

        super().__init__()

        self.is_emr = is_emr

        self.mode = mode

        in_dim = 45

        self.axial_model = models.vgg13(pretrained=True)

        out_channels = self.axial_model.features[0].out_channels

        self.axial_model.features[0] = nn.Conv2d(1, out_channels, kernel_size=7, stride=1, padding=0, bias=False)

        self.axial_model.features[3] = nn.MaxPool2d(1)

        num_ftrs = self.axial_model.classifier[6].in_features

        self.axial_model.classifier[6] = nn.Linear(num_ftrs, 15)

        self.sa_co_model = models.vgg13(pretrained=True)

        self.sa_co_model.features[0] = nn.Conv2d(1, out_channels, kernel_size=7, stride=1, padding=(3,0), bias=False)

        self.sa_co_model.features[3] = nn.MaxPool2d(1)

        num_ftrs = self.sa_co_model.classifier[6].in_features

        self.sa_co_model.classifier[6] = nn.Linear(num_ftrs, 15)

        if self.is_emr:

            self.emr_model = EMRModel()

            if self.mode == 'concat': in_dim = 90

        self.classifier = Classifier(in_dim)

        #print(self.classifier)

 

    def forward(self, axial, sagittal, coronal, emr):

        #print(axial.shape)

        axial = axial[:,:,:-3,:-3]

        sagittal = sagittal[:,:,:,:-3]

        coronal = coronal[:,:,:,:-3]

       

        axial_feature = self.axial_model(axial)

        sagittal_feature = self.sa_co_model(sagittal)

        coronal_feature = self.sa_co_model(coronal)

        out = torch.cat([axial_feature, sagittal_feature, coronal_feature], dim=1)

        out = self.classifier(out)

        if self.is_emr:

            emr_feature = self.emr_model(emr)

            out += emr_feature

        return axial_feature

class EMRModel(nn.Module):

    def __init__(self):

        super().__init__()

        self.layer = nn.Sequential(

            nn.Linear(7, 256),

            nn.BatchNorm1d(256),

            nn.LeakyReLU(negative_slope=0.2),

            nn.Dropout(p=0.2, inplace=True),

            nn.Linear(256, 256),

            nn.BatchNorm1d(256),

            nn.LeakyReLU(negative_slope=0.2),

            nn.Dropout(p=0.2, inplace=True),

            nn.Linear(256, 5),

        )

    def forward(self, x):

        return self.layer(x)

class Classifier(nn.Module):

    def __init__(self, in_dim):

        super().__init__()

        self.layer = nn.Sequential(

            nn.Linear(in_dim, 5)

        )

    def forward(self, x):

        return self.layer(x)

class ConvBN(nn.Module):

    def __init__(self, in_dim, out_dim, **kwargs):

        super().__init__()

        self.layer = nn.Sequential(

            nn.Conv2d(in_dim, out_dim, bias=False, **kwargs),

            nn.BatchNorm2d(out_dim),

            nn.LeakyReLU(negative_slope=0.2))

    def forward(self, x):

        return self.layer(x)

'''

if __name__ == "__main__":

     images = torch.randn(2,1,65,65)

     model = Vgg()

     out = model(images,images,images)

     model = models.vgg16(pretrained=True)

     for k, v in model.state_dict().items():

         print(k)

  '''


Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source