簡體   English   中英

為什么 KNeighborsClassifier 總是預測相同的數字?

[英]Why does KNeighborsClassifier always predict the same number?

為什么 knn 總是預測相同的數字? 我該如何解決這個問題? 數據集在這里

代碼:

import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import scipy.io   
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, DataLoader
from sklearn import preprocessing
import torch
import numpy as np
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics

def load_mat_data(path):
    mat = scipy.io.loadmat(DATA_PATH)
    x,y = mat['data'], mat['class']
    x = x.astype('float32')
    # stadardize values
    standardizer = preprocessing.StandardScaler()
    x = standardizer.fit_transform(x) 
    return x, standardizer, y

def numpyToTensor(x):
    x_train = torch.from_numpy(x)
    return x_train

class DataBuilder(Dataset):
    def __init__(self, path):
        self.x, self.standardizer, self.y = load_mat_data(DATA_PATH)
        self.x = numpyToTensor(self.x)
        self.len=self.x.shape[0]
        self.y = numpyToTensor(self.y)
    def __getitem__(self,index):      
        return (self.x[index], self.y[index])
    def __len__(self):
        return self.len

datasets = ['/home/katerina/Desktop/datasets/GSE75110.mat']

for DATA_PATH in datasets:

    print(DATA_PATH)
    data_set=DataBuilder(DATA_PATH)

    pred_rpknn = [0] * len(data_set.y)
    kf = KFold(n_splits=10, shuffle = True, random_state=7)

    for train_index, test_index in kf.split(data_set.x):
        #Create KNN Classifier
        knn = KNeighborsClassifier(n_neighbors=5)
        #print("TRAIN:", train_index, "TEST:", test_index)
        x_train, x_test = data_set.x[train_index], data_set.x[test_index]
        y_train, y_test = data_set.y[train_index], data_set.y[test_index]
        #Train the model using the training sets
        y1_train = y_train.ravel()
        knn.fit(x_train, y1_train)
        #Predict the response for test dataset
        y_pred = knn.predict(x_test)
        #print(y_pred)
        # Model Accuracy, how often is the classifier correct?
        print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
        c = 0
        for idx in test_index:
            pred_rpknn[idx] = y_pred[c]
            c +=1
    print("Accuracy:",metrics.accuracy_score(data_set.y, pred_rpknn))
    print(pred_rpknn, data_set.y.reshape(1,-1))

輸出:

/home/katerina/Desktop/datasets/GSE75110.mat
Accuracy: 0.2857142857142857
Accuracy: 0.38095238095238093
Accuracy: 0.14285714285714285
Accuracy: 0.4
Accuracy: 0.3
Accuracy: 0.25
Accuracy: 0.3
Accuracy: 0.6
Accuracy: 0.25
Accuracy: 0.45
Accuracy: 0.33497536945812806
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]

我正在嘗試將 knn 與 k 折疊結合起來,以便使用 10 折疊來測試整個數據集。 問題是 knn 總是為每個折疊預測 3 的數組。 我想預測的類是這些:

張量([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]

TL; 博士
它與StandardScaler ,將其更改為簡單的規范化。
例如

from sklearn import preprocessing

...

x = preprocessing.normalize(x)

解釋:

使用它時的標准標量將執行以下操作:

 The standard score of a sample `x` is calculated as: z = (x - u) / s where `u` is the mean of the training samples or zero if `with_mean=False`, and `s` is the standard deviation of the training samples or one if `with_std=False`.

當您確實希望此功能幫助 KNN 確定哪個向量更接近時。

在歸一化中,對每個向量分別進行歸一化,因此它不會影響甚至幫助 KNN 區分向量

使用 KNN StandardScaler實際上可能會損害您的預測。 最好在其他形式的數據中使用它。

import scipy.io
from torch.utils.data import Dataset
from sklearn import preprocessing
import torch
import numpy as np
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics

def load_mat_data(path):
    mat = scipy.io.loadmat(DATA_PATH)
    x, y = mat['data'], mat['class']
    x = x.astype('float32')
    # stadardize values
    x = preprocessing.normalize(x)
    return x, y

def numpyToTensor(x):
    x_train = torch.from_numpy(x)
    return x_train

class DataBuilder(Dataset):
    def __init__(self, path):
        self.x, self.y = load_mat_data(DATA_PATH)
        self.x = numpyToTensor(self.x)
        self.len=self.x.shape[0]
        self.y = numpyToTensor(self.y)
    def __getitem__(self,index):
        return (self.x[index], self.y[index])
    def __len__(self):
        return self.len

datasets = ['/home/katerina/Desktop/datasets/GSE75110.mat']

for DATA_PATH in datasets:

    print(DATA_PATH)
    data_set=DataBuilder(DATA_PATH)

    pred_rpknn = [0] * len(data_set.y)
    kf = KFold(n_splits=10, shuffle = True, random_state=7)

    for train_index, test_index in kf.split(data_set.x):
        #Create KNN Classifier
        knn = KNeighborsClassifier(n_neighbors=5)
        #print("TRAIN:", train_index, "TEST:", test_index)
        x_train, x_test = data_set.x[train_index], data_set.x[test_index]
        y_train, y_test = data_set.y[train_index], data_set.y[test_index]
        #Train the model using the training sets
        y1_train = y_train.view(-1)
        knn.fit(x_train, y1_train)
        #Predict the response for test dataset
        y_pred = knn.predict(x_test)
        #print(y_pred)
        # Model Accuracy, how often is the classifier correct?
        print("Accuracy in loop:", metrics.accuracy_score(y_test, y_pred))
        c = 0
        for idx in test_index:
            pred_rpknn[idx] = y_pred[c]
            c +=1
    print("Accuracy:",metrics.accuracy_score(data_set.y, pred_rpknn))
    print(pred_rpknn, data_set.y.reshape(1,-1))


Accuracy in loop: 1.0
Accuracy in loop: 0.8571428571428571
Accuracy in loop: 0.8571428571428571
Accuracy in loop: 1.0
Accuracy in loop: 0.9
Accuracy in loop: 0.9
Accuracy in loop: 0.95
Accuracy in loop: 1.0
Accuracy in loop: 0.9
Accuracy in loop: 1.0
Accuracy: 0.9359605911330049

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM