繁体   English   中英

RuntimeError:预期的标量类型 Long 但发现 Float (Pytorch)

[英]RuntimeError: expected scalar type Long but found Float (Pytorch)

我已经尝试过很多次修复,我也使用了来自functional.py的示例代码,然后我得到了相同的“损失”值。 我怎样才能解决这个问题?

我的图书馆


    import matplotlib.pyplot as plt
    import torch
    import torch.nn as nn
    import numpy as np
    import matplotlib
    import pandas as pd
    from torch.autograd import Variable
    from torch.utils.data import DataLoader,TensorDataset
    from sklearn.model_selection import train_test_split
    import warnings
    import os
    import torchvision
    import torchvision.datasets as dsets
    import torchvision.transforms as transforms

Mnis的数据集



    
    train=pd.read_csv("train.csv",dtype=np.float32)
    
    
    targets_numpy = train.label.values
    features_numpy = train.loc[:,train.columns != "label"].values/255 # normalization
    
    
    features_train, features_test, targets_train, targets_test = train_test_split(features_numpy,
                                                                                 targets_numpy,test_size = 0.2,
                                                                                 random_state = 42) 
   
    featuresTrain=torch.from_numpy(features_train)
    targetsTrain=torch.from_numpy(targets_train)
    
    
    featuresTest=torch.from_numpy(features_test)
    targetsTest=torch.from_numpy(targets_test)     
    
    
    batch_size=100
    n_iterations=10000
    num_epochs=n_iterations/(len(features_train)/batch_size)
    num_epochs=int(num_epochs)
    
    
    train=torch.utils.data.TensorDataset(featuresTrain,targetsTrain) 
    test=torch.utils.data.TensorDataset(featuresTest,targetsTest)
    
    print(type(train))
    
    
    
    train_loader=DataLoader(train,batch_size=batch_size,shuffle=False)
    test_loader=DataLoader(test,batch_size=batch_size,shuffle=False)
    print(type(train_loader))
    
    plt.imshow(features_numpy[226].reshape(28,28))
    plt.axis("off")
    plt.title(str(targets_numpy[226]))
    plt.show()


这是我的 model

  

    class ANNModel(nn.Module):  
      
      def __init__(self,input_dim,hidden_dim,output_dim):
        super(ANNModel,self).__init__()
    
        
        self.fc1=nn.Linear(input_dim,hidden_dim)
        
        self.relu1=nn.ReLU()
    
        
        self.fc2=nn.Linear(hidden_dim,hidden_dim)
        
        self.tanh2=nn.Tanh()
    
       
    
        
        self.fc4=nn.Linear(hidden_dim,output_dim)
    
      def forward (self,x): #forward ile elde edilen layer lar bağlanır
        
        out=self.fc1(x)
        
        out=self.relu1(out)
    
        
        out=self.fc2(out)
        
        out=self.tanh2(out)
    
       
        
    
        
        out=self.fc4(out)
        return out  
      
    input_dim=28*28
    hidden_dim=150  
    output_dim=10 
    
    
    
    model=ANNModel(input_dim,hidden_dim,output_dim)
    
    
    error=nn.CrossEntropyLoss()
    
    
    learning_rate=0.02
    optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)

问题出在哪里

 

   
    count=0
    loss_list=[]
    iteration_list=[]
    accuracy_list = []
    for epoch in range(num_epochs):
      for i,(images,labels) in enumerate(train_loader):
        
        train=Variable(images.view(-1,28*28))
        labels=Variable(labels)
        #print(labels)
        #print(outputs)  
       
        optimizer.zero_grad()
    
        #forward propagation
        outputs=model(train)
    
       
       
        #outputs=torch.randn(784,10,requires_grad=True)
        ##labels=torch.randn(784,10).softmax(dim=1)
        loss=error(outputs,labels)
        
        
        
       
        loss.backward()
    
        
        optimizer.step()
        
        count+=1
         
        if count %50 ==0:
          
          
          correct=0
          total=0
          
         
          for images,labels in test_loader:
            test=Variable(images.view(-1,28*28))
    
            
            outputs=model(test)
    
            
            predicted=torch.max(outputs.data,1)[1] #mantık???
    
           
            total+= len(labels)
    
            
            correct+=(predicted==labels).sum()
    
          accuracy=100  *correct/float(total)
         
          loss_list.append(loss.data)
          iteration_list.append(count)
          accuracy_list.append(accuracy)
          if  count %500 ==0 :
           
           print('Iteration: {}  Loss: {}  Accuracy: {} %'.format(count, loss.data, accuracy))


这使



    ---------------------------------------------------------------------------
    RuntimeError                              Traceback (most recent call last)
    <ipython-input-9-9e53988ad250> in <module>()
         26     #outputs=torch.randn(784,10,requires_grad=True)
         27     ##labels=torch.randn(784,10).softmax(dim=1)
    ---> 28     loss=error(outputs,labels)
         29 
         30 
    
    2 frames
    /usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
       2844     if size_average is not None or reduce is not None:
       2845         reduction = _Reduction.legacy_get_string(size_average, reduce)
    -> 2846     return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
       2847 
       2848 
    
    RuntimeError: expected scalar type Long but found Float

似乎张量“标签”的 dtype 是 FloatTensor。 但是,nn.CrossEntropyLoss 需要 LongTensor 类型的目标。 这意味着您应该检查“标签”的类型。 如果是这种情况,那么您应该使用以下代码将“标签”的 dtype 从 FloatTensor 转换为 LongTensor:

loss=error(outputs,labels.long())
targetsTrain=torch.from_numpy(targets_train)
targetsTest=torch.from_numpy(targets_test)

在这些行中,您必须添加以下代码:

targetsTrain=torch.from_numpy(targets_train).type(torch.LongTensor)#data type is long
targetsTest=torch.from_numpy(targets_test).type(torch.LongTensor)#data type is long

然后它可以正常工作

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM