簡體   English   中英

pytorch 多類 lstm 在測試中預測所有一類

[英]pytorch multi-class lstm predicting all one class on testing

我正在做一個項目(我的第一個 AI 項目),但遇到了一些障礙。 在我訓練的分類器上執行測試時,它預測一切都屬於第 1 類。現在數據集嚴重偏向第 1 類; 但是,我已經實施了權重來彌補這一點。 只是擔心我編碼錯誤或遺漏了什么。 如果你看到任何東西,請告訴我。

這是設置和培訓

  batchSize = 50

trainingLoad = DataLoader(trainingData, shuffle = True, batch_size = batchSize, drop_last=True)
validationLoad = DataLoader(validationData, shuffle = True, batch_size = batchSize, drop_last=True)
testingLoad = DataLoader(testingData, shuffle = True, batch_size = batchSize, drop_last=True)

vocabularySize = len(wordToNoDict)
output = 3
embedding = 400
hiddenDimension = 524
layers = 4

classifierModel = Classifier.HateSpeechDetector(device, vocabularySize, output, embedding, hiddenDimension, layers)
classifierModel.to(device)

path = 'Program\data\state_dict2.pt'

weights = torch.tensor([1203/1203, 1203/15389, 1203/3407])
criterion = nn.CrossEntropyLoss(weight = weights)

trainClassifier(classifierModel, trainingLoad, validationLoad, device, batchSize, criterion, path)

test(classifierModel, path, testingLoad, batchSize, device, criterion)
def trainClassifier(model, trainingData, validationData, device, batchSize, criterion, path):
epochs = 5
counter = 0
testWithValiEvery = 10
clip = 5
valid_loss_min = np.Inf

lr=0.0001
optimizer = torch.optim.Adam(model.parameters(), lr=lr)


model.train()

for i in range(epochs):

    h = model.init_hidden(batchSize, device)
    for inputs, labels in trainingData:
        h = tuple([e.data for e in h])
        inputs, labels = inputs.to(device), labels.to(device) 
        model.zero_grad()
        output, h = model(inputs, h)
        loss = criterion(output.squeeze(), labels.long())
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), clip)
        optimizer.step()
        counter += 1
        print(counter)

        if counter%testWithValiEvery == 0:
            print("validating")
            val_h = model.init_hidden(batchSize, device)
            val_losses = []
            model.eval()
            for inp, lab in validationData:
                val_h = tuple([each.data for each in val_h])
                inp, lab = inp.to(device), lab.to(device)

                out, val_h = model(inp, val_h)#


                val_loss = criterion(out.squeeze(), lab.long())
                val_losses.append(val_loss.item())

            model.train()
            print("Epoch: {}/{}...".format(i+1, epochs),
                "Step: {}...".format(counter),
                "Loss: {:.6f}...".format(loss.item()),
                "Val Loss: {:.6f}".format(np.mean(val_losses)))
            if np.mean(val_losses) <= valid_loss_min:
                torch.save(model.state_dict(), path)
                print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(valid_loss_min,np.mean(val_losses)))
                print('model saved')
                valid_loss_min = np.mean(val_losses)

這是分類器 - 這里有相當數量的隨機評論,我已經干預了比特

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as op
import torchvision
from torch.utils.data import TensorDataset, DataLoader
from torchvision import transforms, datasets


class HateSpeechDetector(nn.Module):
    def __init__(self, device, vocabularySize, output, embedding, hidden, layers, dropProb=0.5):
        super(HateSpeechDetector, self).__init__()
        #Number of outputs (Classes/Categories)
        self.output = output
        #Number of layers in the LSTM
        self.numLayers = layers
        #Number of hidden neurons in each LSTM layer
        self.hiddenDimensions = hidden
        #Device being used for by model (CPU or GPU)
        self.device = device

        #Embedding layer finds correlations in words by converting word integers into vectors
        self.embedding = nn.Embedding(vocabularySize, embedding)
        #LSTM stores important data in memory, using it to help with future predictions
        self.lstm = nn.LSTM(embedding,hidden,layers,dropout=dropProb,batch_first=True)
        #Dropout is used to randomly drop nodes. This helps to prevent overfitting of the model during training
        self.dropout = nn.Dropout(dropProb)

        #Establishing 4 simple layers and a sigmoid output
        self.fc = nn.Linear(hidden, hidden)
        self.fc2 = nn.Linear(hidden, hidden)
        self.fc3 = nn.Linear(hidden, hidden)
        self.fc4 = nn.Linear(hidden, hidden)
        self.fc5 = nn.Linear(hidden, hidden)
        self.fc6 = nn.Linear(hidden, output)
        self.softmax = nn.Softmax(dim=2)

    def forward(self, x, hidden):
        batchSize = x.size(0)

        x = x.long()

        embeds = self.embedding(x)

        lstm_out, hidden = self.lstm(embeds, hidden)

        #Tensor changes here from 250,33,524 to 8250,524
        # lstm_out = lstm_out.contiguous().view(-1,self.hiddenDimensions)

        out = self.dropout(lstm_out)
        out = self.fc(out)
        out = self.fc2(out)
        out = self.fc3(out)
        out = self.fc4(out)
        out = self.fc5(out)
        out = self.fc6(out)

        out = self.softmax(out) 

        out = out[:,-1,:]

        # myTensor = torch.Tensor([0,0,0])
        # newOut = torch.zeros(batchSize, self.output)
        # count = 0
        # row = 0

        # for tensor in out:
        #     if(count == 33):
        #         newOut[row] = myTensor/33
        #         myTensor = torch.Tensor([0,0,0])
        #         row += 1
        #         count = 0
        #     myTensor += tensor
        #     count += 1
        return out, hidden

    def init_hidden(self, batchSize, device):
        weight = next(self.parameters()).data

        hidden = (weight.new(self.numLayers, batchSize, self.hiddenDimensions).zero_().to(device), weight.new(self.numLayers, batchSize, self.hiddenDimensions).zero_().to(device))

        return hidden

您已經為交叉熵損失添加了權重,並且權重已經偏向第一類 ( [1.0, 0.08, 0.35] )。

對某個類具有更高的權重意味着模型將因該類錯誤而受到更嚴重的懲罰,並且模型有可能學習僅將所有內容預測為具有最高權重的類。 通常您不需要手動分配權重。

此外,檢查您的數據以查看是否存在標簽不平衡,即您是否有更多的第一類訓練示例。 不平衡的訓練集與為損失設置不同的權重具有相似的效果。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM