简体   繁体   中英

RuntimeError: 1D target tensor expected, multi-target not supported Python: NumPy

I am dealing with a CNN and I get the following error on the line loss = criterion(outputs, data_y) :

Here is the relevant code snippet:

def run(model, X_train, Y_train, X_test, Y_test, learning_rate=0.01,
          num_epochs=100, minibatch_size=8, print_cost=True):

    seed = 0                                         # to keep results consistent (numpy seed)
    (m, n_H0, n_W0, n_C0) = X_train.shape                                  
    costs = []                                       # To keep track of the cost
    
    criterion = nn.NLLLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
     
    # Training loop
    model.train() # Turn on the training mode
    for epoch in range(num_epochs):

        minibatch_cost = 0.
        num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
        seed = seed + 1
        minibatches = utils.generate_minibatch(X_train, Y_train, minibatch_size, seed)

        for minibatch in minibatches:
            (batch_x, batch_y) = minibatch
            data_x = torch.Tensor(batch_x)
            data_y = torch.LongTensor(batch_x)
            
            ### START YOUR CODE ### 
            # Zero the gradients
            optimizer.zero_grad() # Hint: call zero_grad()
            
            # Forward pass and compute loss
            outputs = model(data_x) # Hint: use model as a callable
            loss = criterion(outputs, data_y)  # Hint: use criterion as a callable
            
            # Backward and optimize
            loss.backward() # Hint: call backward()
            optimizer.step()  # Hint: call step()
            ### END YOUR CODE ###
            
            minibatch_cost += loss.item()
        
        # Print the cost every epoch
        minibatch_cost /= num_minibatches
        if print_cost and epoch % 5 == 0:
            print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
        costs.append(minibatch_cost)

    # Calculate accuracy on the train and test datasets
    data_x = torch.Tensor(X_test)
    data_y = torch.LongTensor(Y_test)
    model.eval() # Turn on the evaluation mode
    with torch.no_grad():
        test_pred = model(data_x)
        num_correct = (torch.argmax(test_pred, dim=1).view(data_y.size()).data == data_y.data).float().sum()
        test_acc = num_correct / test_pred.size()[0]
    print("Test Accuracy:", test_acc.item())


model = CNN_Model()

torch.manual_seed(0)
run(model, X_train, Y_train, X_test, Y_test)

And here is the error I'm getting:

RuntimeError                              Traceback (most recent call last)
<ipython-input-6-9839fc42e5c2> in <module>
      3 
      4 torch.manual_seed(0)
----> 5 run(model, X_train, Y_train, X_test, Y_test)
      6 
      7 # NOTE: It could be slow to run 100 epochs. Make sure that your costs for after each epoch

<ipython-input-5-05ddcdc9ddf5> in run(model, X_train, Y_train, X_test, Y_test, learning_rate, num_epochs, minibatch_size, print_cost)
     40             # Forward pass and compute loss
     41             outputs = model(data_x) # Hint: use model as a callable
---> 42             loss = criterion(outputs, data_y)  # Hint: use criterion as a callable
     43 
     44             # Backward and optimize

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
    214     def forward(self, input: Tensor, target: Tensor) -> Tensor:
    215         assert self.weight is None or isinstance(self.weight, Tensor)
--> 216         return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
    217 
    218 

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
   2383         )
   2384     if dim == 2:
-> 2385         ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
   2386     elif dim == 4:
   2387         ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

RuntimeError: 1D target tensor expected, multi-target not supported

This error usually appears when you pass a one-hot-encoded target to CrossEntropy or NLLLoss (instead of a single class index), but your problem is simpler - you just have a typo here:

data_y = torch.LongTensor(batch_x) # <- should be `batch_y`

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM