一、底层算法
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#先随机设置十个样本点表示十杯酒
rowdata = {'颜色深度':[14.23,13.2,13.16,14.37,13.24,12.07,12.43,11.79,12.37,12.04],
'酒精浓度':[5.64,4.38,5.68,4.80,4.32,2.76,3.94,3. ,2.12,2.6 ],
'品种':[0,0,0,0,0,1,1,1,1,1]}
# 0代表黑皮诺 , 1代表赤霞珠
wine_data = pd.DataFrame(rowdata)
wine_data
X = np.array(wine_data.iloc[:,0:2])
X
y = np.array(wine_data.iloc[:,-1])
y
new_data = np.array([12.8,4.1]) #要判断的数据
# 1 算距离
from math import sqrt
distance = [sqrt(np.sum((x-new_data)**2)) for x in X]
distance
# 2 找邻居
sort_dist = np.argsort(distance) #排序 返回数组的索引
sort_dist
k = 3 #选取前3个样本
topK = [y[i] for i in sort_dist[:k]]
topK
# 3 做分类
from collections import Counter #对已有的数据类别进行计数,返回字典
votes = Counter(topK)
votes
predict = votes.most_common(1)[0][0] #排序,索引 次数出现最多的
predict
#打包成函数
def KNN(inx,dataset,k):
import numpy as np
import pandas as pd
from math import sqrt
from collections import Counter
result=[]
distance = [sqrt(np.sum((x-inx)**2)) for x in np.array(dataset.iloc[:,0:2])]
sort_dist = np.argsort(distance)
topK = [dataset.iloc[:,-1][i] for i in sort_dist[:k]]
result.append(Counter(topK).most_common(1)[0][0])
return result
KNN(new_data,wine_data,3)
二、sklearn实现
from sklearn.neighbors import KNeighborsClassifier # 调包 k近邻分类型
# 实例化(赋值的过程:将算法本身的模型赋值给一个变量)
clf = KNeighborsClassifier(n_neighbors=3)
# 训练模型
clf = clf.fit(X,y)
# 预测输出,返回预测的标签
result = clf.predict([[12.8,4.1]])
result
# 模型的评估,接口score返回预测的准确率
score = clf.score([[12.8,4.1]],[0])
score
# 返回预测的概率
clf.predict_proba([[12.8,4.1]])
三、KNN算法性能的优化
基于乳腺癌二分类数据集,预测是良性还是恶性
1、导入数据集
from sklearn.datasets import load_breast_cancer
2、探索数据集
data = load_breast_cancer() #二分类的数据集 标签属性:1是恶性 0 是良性
data
X = data.data
X.shape
y = data.target
y.shape
data.feature_names
3、划分训练集和测试集
from sklearn.model_selection import train_test_split #划分数据集的算法包
Xtrain,Xtest,Ytrain,Ytest = train_test_split(X,y,test_size=0.2) #多重赋值法
Xtrain.shape
# 建模
clf = KNeighborsClassifier(n_neighbors=5)
clf = clf.fit(Xtrain,Ytrain)
score = clf.score(Xtest,Ytest)
score #准确率分数
4、交叉验证
from sklearn.model_selection import cross_val_score as CVS
clf = KNeighborsClassifier(n_neighbors=5)
cvresult = CVS(clf,Xtrain,Ytrain,cv=5) #学习的结果是交叉验证的结果 cv是折数,默认3折
cvresult
cvresult.mean() # 模型的平均效果
cvresult.var() # 模型是否稳定
score=[]
var_ = []
krange = range(1,21)
for i in krange: #学习曲线(做循环)
clf = KNeighborsClassifier(n_neighbors=i) #实例化
cvresult = CVS(clf,Xtrain,Ytrain,cv=5) #交叉验证 每个k对应5个分数
score.append(cvresult.mean()) #此处对五个分数取均值
var_.append(cvresult.var())
plt.plot(krange,score,color='k') #交叉验证均值的线
plt.plot(krange,np.array(score)+np.array(var_)*5,c='red',linestyle='--')
plt.plot(krange,np.array(score)-np.array(var_)*5,c='red',linestyle='--')
可视化结果:
先看准确率(最高的),两条虚线间的差距不大(泛化能力波动不大,模型是稳定的),可直接选择最高点对应的K值。
5、归一化可以提升模型预测准确率
from sklearn.preprocessing import MinMaxScaler as mms #归一化模块
X_ = mms().fit_transform(X) #全数据集来做
score=[]
var_ = []
krange = range(1,21)
for i in krange:
clf = KNeighborsClassifier(n_neighbors=i)
cvresult = CVS(clf,X_,y,cv=5)
score.append(cvresult.mean())
var_.append(cvresult.var())
plt.plot(krange,score,color='k')
plt.plot(krange,np.array(score)+np.array(var_),c='red',linestyle='--')
plt.plot(krange,np.array(score)-np.array(var_),c='red',linestyle='--')
模型能力显著提升
6、加权KNN
加权重,weights参数 减少噪音
clf = KNeighborsClassifier(n_neighbors=3,
weights='distance').fit(X_train,Ytrain)
score = clf.score(X_test,Ytest)
score
来源:https://blog.csdn.net/m0_45384958/article/details/100807450