'ValueError: not enough values to unpack (expected 2, got 1) when training model
I am trying a model using pytorch but i am getting this error ValueError: not enough values to unpack (expected 2, got 1).
This is how my data looks
data = np.random.rand(400, 46, 55, 46)
# Datasets initialization
ds = TensorDataset(torch.from_numpy(data))
train_ds, valid_ds = random_split(ds, (350, 50))
# Dataloader wrappers
train_dl, valid_dl = DataLoader(train_ds), DataLoader(valid_ds)
model
class AutoEncoder(pl.LightningModule):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(46*55*46, 400),
nn.Tanh())
self.decoder = nn.Sequential(
nn.Linear(400, 46*55*46),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def training_step(self, train_batch, batch_idx):
x,y = train_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss)
return loss
def validation_step(self, val_batch, batch_idx):
x, y = val_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('val_loss', loss)
model = AutoEncoder()
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|
