簡體   English   中英

ValueError: 目標尺寸 (torch.Size([1, 1])) 必須與輸入尺寸相同 (torch.Size([1, 2]))

[英]ValueError: Target size (torch.Size([1, 1])) must be the same as input size (torch.Size([1, 2]))

在嘗試用 2 個目標訓練我的模型時,我收到錯誤...“ValueError: Target size (torch.Size([1, 1])) must be the same as input size (torch.Size([1, 2]) )”。 我有一個包含 2 個目標的數據集。 即使通過調整張量的大小,我也嘗試了很多,但沒有用。 此外,如果我使 output_dim = 1,它總是從兩個中預測相同的類。

** 加載訓練數據

class SwelltrainDataset(T.utils.data.Dataset):

  def __init__(self, Swelltrain):
    
    sc = StandardScaler()
    
    X_tr = sc.fit_transform(X_train)
    Y_tr = y_train
      
    self.X_tr = torch.tensor(X_tr, dtype = torch.float32)
    self.Y_tr = torch.tensor(Y_tr, dtype = torch.float32)

  def __len__(self):
    return len(self.Y_tr)

  def __getitem__(self, idx):
              
        return self.X_tr[idx], self.Y_tr[idx]
train_ds = SwelltrainDataset(Swelltrain)

bat_size = 1
idx = np.append(np.where(train_ds.Y_tr == 0)[0], 
                np.where(train_ds.Y_tr == 1)[0],
                )

train_ds.X_tr = train_ds.X_tr[idx]
train_ds.Y_tr = train_ds.Y_tr[idx]

train_ldr = T.utils.data.DataLoader(train_ds,
    batch_size=bat_size, shuffle=True)
batch = next(iter(train_ldr))

我正在使用具有維度的 LSTM 模型:input_dim = 16,hidden_​​dim = 100,layer_dim = 1,output_dim = 2

class LSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
        super(LSTMModel, self).__init__()
        self.hidden_dim = hidden_dim
        
        self.layer_dim = layer_dim

        self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, dropout=1, batch_first=True, )
      
        self.fc = nn.Linear(hidden_dim, output_dim)
       
    def forward(self, x):
        h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()

        c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
        
        x, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
       
        x = self.fc(x[:, -1, :]) 
        return (x)   

**模型訓練

optimizer = optim.Adam(model.parameters(), lr=0.001)

loss_func = nn.BCEWithLogitsLoss()

epochs = 2
loss_list = []

model.train()

for epoch in range(epochs):
    total_loss = []
    
    for X_tr, Y_tr in train_ldr:
  
      X_tr = X_tr.unsqueeze(1)
      
     
      Y_tr = Y_tr.type(torch.LongTensor)
      Y_tr = Y_tr.unsqueeze(1)
      
      optimizer.zero_grad()
      output = model(X_tr.float())

      pred = output.argmax(dim=1, keepdim=True)

     loss = loss_func(output, Y_tr.float())
         
      loss.backward()
       
      optimizer.step()
        
      total_loss.append(loss.item())

    loss_list.append(sum(total_loss)/len(total_loss))
    print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
         100. * (epoch + 1) / epochs, loss_list[-1]))
ValueError                                Traceback (most recent call last)
<ipython-input-30-1ab26e6f45d7> in <module>
     31 
     32 #      print(Y_tr.size())
---> 33       loss = loss_func(output, Y_tr.float())
     34 
     35         # Backward pass

~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

~\anaconda3\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
    712         assert self.weight is None or isinstance(self.weight, Tensor)
    713         assert self.pos_weight is None or isinstance(self.pos_weight, Tensor)
--> 714         return F.binary_cross_entropy_with_logits(input, target,
    715                                                   self.weight,
    716                                                   pos_weight=self.pos_weight,

~\anaconda3\lib\site-packages\torch\nn\functional.py in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
   2825 
   2826     if not (target.size() == input.size()):
-> 2827         raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
   2828 
   2829     return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)

ValueError: Target size (torch.Size([1, 1])) must be the same as input size (torch.Size([1, 2]))

嘗試將您的 Y_tr 變量轉換為 2 類第一熱標簽:

one_hot_label = torch.nn.functional.one_hot(Y_tr.to(torch.int64), 2)

one_hot_label = one_hot_label.float()

這應該具有目標 torch.Size([1, 2]) 的形狀

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM