简体   繁体   English

使用 Pytorch 进行线性回归

[英]Linear regression using Pytorch

I have classification problem.我有分类问题。 I am using Pytorch, My input is sequence of length 341 and output one of three classes {0,1,2}, I want to train linear regression model using pytorch, I created the following class but during the training, the loss values start to have numbers then inf then NAN. I am using Pytorch, My input is sequence of length 341 and output one of three classes {0,1,2}, I want to train linear regression model using pytorch, I created the following class but during the training, the loss values start有数字,然后是inf,然后是NAN。 I do not know how to fix that.我不知道如何解决。 Also I tried to initialize the weights for linear model but it is the same thing.我也尝试初始化线性 model 的权重,但它是同一回事。 Any suggestions.有什么建议么。

class regression(nn.Module):
    def __init__(self, input_dim):
        super().__init__()
        self.input_dim = input_dim
        # One layer
        self.linear = nn.Linear(input_dim, 1)

    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred

criterion = torch.nn.MSELoss()

def fit(model, data_loader, optim, epochs):
    for epoch in range(epochs):

        for i, (X, y) in enumerate(data_loader):
            X = X.float()
            y = y.unsqueeze(1).float()
            X = Variable(X, requires_grad=True)
            y = Variable(y, requires_grad=True)
            # Make a prediction for the input X
            pred = model(X)
            #loss = (y-pred).pow(2).mean()
            loss = criterion(y, pred)
            optim.zero_grad()
            loss.backward()
            optim.step()
            print(loss)
            print(type(loss))
        # Give some feedback after each 5th pass through the data
        if epoch % 5 == 0:
            print("Epoch", epoch, f"loss: {loss}")
    return None
regnet = regression(input_dim=341)
optim = SGD(regnet.parameters(), lr=0.01)
fit(regnet, data_loader, optim=optim, epochs=5)
pred = regnet(torch.Tensor(test_set.data_info).float())
pred = pred.detach().numpy()

cause of my reputation number I can't comment.so if I was you.我的声誉数量的原因我无法评论。所以如果我是你。 I'm gonna build like this: I think there is something wrong with your method of making a Module.我要这样构建:我认为您制作模块的方法有问题。

class regression(nn.Module):
    def __init__(self,input_dim,output_dim):
        super(regression,self).__init__()
        #function
        self.linear=nn.Linear(input_dim,output_dim)

    def forward(self,x):
        return self.linear(x)
#define the model
input_dim=341
output_dim=3
model=LinearRegression(input_dim,output_dim) 

# Mean square error
mse=nn.MSELoss()

#Optimization
learning_rate=0.01
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)

#train the model
loss_list=[]
iteration_number=X
for iteration in range(iteration_number):
    #optimiziation
    optimizer.zero_grad()

    #forward to get output
    results=model("input_datas_tensor") 
    
   
    #loss calculate
    loss=mse(results,"outputs_datas_tensor")

    #backward propagation
    loss.backward()
    #updating parameters
    optimizer.step()
    #store loss
    loss_list.append(loss.data)
    
    if(iteration  %5==0):
        print("epoch{} ,loss{}".format(iteration,loss.data))

I would additionally suggest to replace MSE with CrossEntropy Loss as it is better suited for multi-class classificiation problems.我还建议用 CrossEntropy Loss 替换 MSE,因为它更适合多类分类问题。

import random
import torch
from torch import nn, optim
from matplotlib import pyplot as plt

# Generate random dataset with your shape to test
# Replace this with your own dataset
data = []
for label in [0, 1, 2]:
    for i in range(1000):
        data.append((torch.rand(341), label))

# train test split
random.shuffle(data)
train, val = data[:1500], data[1500:]    


 def run_gradient_descent(model, data_train, data_val, batch_size=64, learning_rate=0.01, weight_decay=0, num_epochs=10):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    iters, losses = [], []
    iters_sub, train_acc, val_acc = [], [] ,[]
    train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True)

    # training
    n = 0 # the number of iterations
    for epoch in range(num_epochs):
        for xs, ts in iter(train_loader):
            if len(ts) != batch_size:
                continue
            zs = model(xs)
            loss = criterion(zs, ts) # compute the total loss
            loss.backward() # compute updates for each parameter
            optimizer.step() # make the updates for each parameter
            optimizer.zero_grad() # a clean up step for PyTorch
            # save the current training information
            iters.append(n)
            losses.append(float(loss)/batch_size) # compute *average* loss
            if n % 10 == 0:
                iters_sub.append(n)
                train_acc.append(get_accuracy(model, data_train))
                val_acc.append(get_accuracy(model, data_val))
             # increment the iteration number
            n += 1

    # plotting
    plt.title("Training Curve (batch_size={}, lr={})".format(batch_size, learning_rate))
    plt.plot(iters, losses, label="Train")
    plt.xlabel("Iterations")
    plt.ylabel("Loss")
    plt.show()
    plt.title("Training Curve (batch_size={}, lr={})".format(batch_size, learning_rate))
    plt.plot(iters_sub, train_acc, label="Train")
    plt.plot(iters_sub, val_acc, label="Validation")
    plt.xlabel("Iterations")
    plt.ylabel("Accuracy")
    plt.legend(loc='best')
    plt.show()
    return model


def get_accuracy(model, data):
    loader = torch.utils.data.DataLoader(data, batch_size=500)
    correct, total = 0, 0
    for xs, ts in loader:
        zs = model(xs)
        pred = zs.max(1, keepdim=True)[1] # get the index of the max logit
        correct += pred.eq(ts.view_as(pred)).sum().item()
        total += int(ts.shape[0])
    return correct / total


class MyRegression(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(MyRegression, self).__init__()
        # One layer
        self.linear = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        return self.linear(x)


model = MyRegression(341, 3)
run_gradient_descent(model, train, val, batch_size=64, learning_rate=0.01, num_epochs=10)

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM