本文共 12049 字,大约阅读时间需要 40 分钟。
KNN与线性回归
K近邻算法(K-Nearest Neighbors, KNN)是一种简单有效的分类和回归方法。它通过计算每个样本到其最近邻居的距离,找出最多的类别出现次数来进行分类,或者直接使用目标值的平均值进行回归。
1.1 KNN实现分类
1.1.1 二分类
在分类任务中,KNN算法通过计算每个样本到训练集中所有样本的距离,确定其最近的邻居类别,从而进行预测。以下是实现KNN分类的代码示例:
import numpy as npfrom sklearn.neighbors import KNeighborsClassifierfrom sklearn.model_selection import train_test_split# 生成样本数200,分类数为2的数据集data = make_blobs(n_samples=200, centers=2, random_state=8)X, y = data# 训练模型clf = KNeighborsClassifier()clf.fit(X, y)# 可视化x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])# 绘制决策边界plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel1)plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.spring, edgecolors='k')plt.title("KNN分类")plt.show()# 测试新数据点print("新数据点的分类:", clf.predict([[6.5, 4.3]])) 1.1.2 多分类
对于多分类任务,KNN同样适用。以下是实现多分类的代码示例:
import numpy as npfrom sklearn.neighbors import KNeighborsClassifierfrom sklearn.model_selection import train_test_split# 生成样本数500,分类数为5的数据集data = make_blobs(n_samples=500, centers=5, random_state=8)X, y = data# 训练模型clf = KNeighborsClassifier()clf.fit(X, y)# 可视化x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])# 绘制决策边界plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel1)plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.spring, edgecolors='k')plt.title("KNN多分类")plt.show()# 测试新数据点print("模型正确率:", clf.score(X, y)) 1.2 回归
KNN也可以用于回归任务。以下是实现KNN回归的代码示例:
import numpy as npfrom sklearn.neighbors import KNeighborsRegressor# 生成带噪声的线性数据集X, y = make_regression(n_features=1, n_informative=1, noise=50, random_state=8)# 训练模型reg = KNeighborsRegressor()reg.fit(X, y)# 可视化plt.scatter(X, y, c='orange', edgecolors='k')z = np.linspace(-3, 3, 200).reshape(-1, 1)plt.plot(z, reg.predict(z), c='k', linewidth=3)plt.title('KNN Regressor')plt.show()# 评估模型性能print("模型评分:", reg.score(X, y)) 线性回归
线性回归是一种统计分析方法,用于建立因变量与一个或多个自变量之间的线性关系。以下是使用随机森林进行线性回归的代码示例:
import numpy as npimport matplotlib.pyplot as pltfrom sklearn.linear_model import LinearRegressionfrom sklearn.datasets import load_diabetes# 加载糖尿病数据集data = load_diabetes()X = data['data']y = data['target']# 划分训练集和测试集X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# 训练线性回归模型model = LinearRegression().fit(X_train, y_train)# 评估模型性能print("模型系数:", model.coef_)print("模型截距:", model.intercept_)print("训练集得分:", model.score(X_train, y_train))print("测试集得分:", model.score(X_test, y_test))# 绘制回归线plt.figure()plt.scatter(np.arange(len(y_test)), y_test, c='b', s=80)plt.plot(np.arange(len(y_test)), model.predict(np.arange(len(y_test))), c='r')plt.title('Linear Regression')plt.show() 4.1 正则化
为了防止模型过拟合,我们可以使用正则化技术。Ridge回归是一种L2正则化方法,通过在目标函数中增加一个惩罚项来约束模型参数。以下是使用Ridge回归的代码示例:
import numpy as npfrom sklearn.linear_model import Ridgefrom sklearn.datasets import load_diabetesfrom sklearn.model_selection import train_test_split# 加载糖尿病数据集data = load_diabetes()X = data['data']y = data['target']# 划分训练集和测试集X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# 定义Ridge回归模型ridge = Ridge()# 使用网格搜索优化alpha参数from sklearn.model_selection import GridSearchCVparams = {'alpha': np.logspace(-3, 2, 10)}ridge_cv = GridSearchCV(ridge, params, cv=5)ridge_cv.fit(X_train, y_train)# 评估模型性能print("Ridge模型系数:", ridge_cv.best_estimator_.coef_)print("Ridge模型截距:", ridge_cv.best_estimator_.intercept_)print("训练集得分:", ridge_cv.score(X_train, y_train))print("测试集得分:", ridge_cv.score(X_test, y_test))# 绘制回归线plt.figure()plt.scatter(np.arange(len(y_test)), y_test, c='b', s=80)plt.plot(np.arange(len(y_test)), ridge_cv.best_estimator_.predict(np.arange(len(y_test))), c='r')plt.title('Ridge回归')plt.show() Lasso回归是一种L1正则化方法,适用于特征选择。以下是使用Lasso回归的代码示例:
import numpy as npfrom sklearn.linear_model import Lassofrom sklearn.datasets import load_diabetesfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import StandardScaler# 加载糖尿病数据集data = load_diabetes()X = data['data']y = data['target']# 划分训练集和测试集X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# 标准化数据scaler = StandardScaler()X_train_scaled = scaler.fit_transform(X_train)X_test_scaled = scaler.transform(X_test)# 定义Lasso回归模型lasso = Lasso()# 使用网格搜索优化alpha参数from sklearn.model_selection import GridSearchCVparams = {'alpha': np.logspace(-3, 2, 10)}lasso_cv = GridSearchCV(lasso, params, cv=5)lasso_cv.fit(X_train_scaled, y_train)# 评估模型性能print("Lasso模型系数非零特征数:", sum(lasso_cv.best_estimator_.coef_ != 0))print("Lasso模型截距:", lasso_cv.best_estimator_.intercept_)print("训练集得分:", lasso_cv.score(X_train_scaled, y_train))print("测试集得分:", lasso_cv.score(X_test_scaled, y_test))# 绘制回归线plt.figure()plt.scatter(np.arange(len(y_test)), y_test, c='b', s=80)plt.plot(np.arange(len(y_test)), lasso_cv.best_estimator_.predict(np.arange(len(y_test))), c='r')plt.title('Lasso回归')plt.show() 随机森林
随机森林是一种集成学习方法,通过随机选择样本和特征来生成多个决策树,从而提高模型的泛化能力和鲁棒性。以下是使用随机森林进行回归的代码示例:
import numpy as npfrom sklearn.ensemble import RandomForestRegressorfrom sklearn.datasets import load_diabetesfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import StandardScaler# 加载糖尿病数据集data = load_diabetes()X = data['data']y = data['target']# 划分训练集和测试集X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# 标准化数据scaler = StandardScaler()X_train_scaled = scaler.fit_transform(X_train)X_test_scaled = scaler.transform(X_test)# 定义随机森林回归模型forest = RandomForestRegressor(random_state=1, n_jobs=-1)# 使用网格搜索优化参数from sklearn.model_selection import GridSearchCVparams = {'n_estimators': [100, 500, 200], 'max_depth': [3, 5, 10, 20]}forest_cv = GridSearchCV(forest, params, cv=5)forest_cv.fit(X_train_scaled, y_train)# 评估模型性能print("最优模型:", forest_cv.best_estimator_)print("训练集得分:", forest_cv.score(X_train_scaled, y_train))print("测试集得分:", forest_cv.score(X_test_scaled, y_test))# 绘制回归线plt.figure()plt.scatter(np.arange(len(y_test)), y_test, c='b', s=80)plt.plot(np.arange(len(y_test)), forest_cv.best_estimator_.predict(np.arange(len(y_test))), c='r')plt.title('Random Forest Regression')plt.show() SVR
支持向量回归(Support Vector Regression, SVR)是一种基于支持向量的回归方法,能够处理非线性关系。以下是使用SVR进行回归的代码示例:
import numpy as npfrom sklearn.svm import SVRfrom sklearn.datasets import load_bostonfrom sklearn.model_selection import train_test_split# 加载房价数据集boston = load_boston()X, y = boston.data, boston.target# 划分训练集和测试集X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# 标准化数据from sklearn.preprocessing import StandardScalerscaler = StandardScaler()X_train_scaled = scaler.fit_transform(X_train)X_test_scaled = scaler.transform(X_test)# 定义SVR模型svr = SVR()# 测试线性核函数和rbf核函数for kernel in ['linear', 'rbf']: svr = SVR(kernel=kernel) svr.fit(X_train_scaled, y_train) print("Training score:", svr.score(X_train_scaled, y_train)) print("Testing score:", svr.score(X_test_scaled, y_test))# 使用rbf核函数并绘制预测结果svr = SVR(kernel='rbf', gamma=0.1)svr.fit(X_train_scaled, y_train)y_pred = svr.predict(X_test_scaled)plt.figure()plt.scatter(np.arange(len(y_test)), y_test, c='b', s=80)plt.plot(np.arange(len(y_test)), y_pred, c='r')plt.title('SVR Regression')plt.show() MLP
多层感知机(MLP)是一种典型的深度神经网络,用于解决分类和回归问题。以下是实现MLP回归的代码示例:
import numpy as npfrom sklearn.neural_network import MLPRegressor# 生成随机序列rnd = np.random.RandomState(38)X = rnd.uniform(-5, 5, size=50).reshape(-1, 1)y = (np.cos(6 * X) + X) / 2 + rnd.normal(size=len(X)) / 2# 训练MLP模型model = MLPRegressor().fit(X, y)# 绘制预测结果line = np.linspace(-5, 5, 1000, endpoint=False).reshape(-1, 1)plt.plot(line, model.predict(line), label='MLP', c='k')plt.scatter(X, y)plt.legend(loc='best')plt.title('MLP Regression')plt.show() 数据预处理
数据预处理是机器学习中的重要步骤,主要包括归一化、标准化、装箱、特征选择等。以下是常用的数据预处理方法的示例:
import numpy as npfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, Normalizer# 生成样本数据X = np.random.uniform(-1, 1, size=(100, 2))# 标准化数据scaler = StandardScaler()X_scaled = scaler.fit_transform(X)plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=np.repeat('a', 100), cmap='cool')plt.title('标准化数据')plt.show()# 最大极小归一化mmscaler = MinMaxScaler()X_mmscaled = mmscaler.fit_transform(X)plt.scatter(X_mmscaled[:, 0], X_mmscaled[:, 1], c=np.repeat('a', 100), cmap='cool')plt.title('最大极小归一化数据')plt.show()# 机器学习中的数据预处理流程plt.figure()plt.subplot(2, 2, 1)plt.imshow(X, cmap='hot')plt.title('原始数据')plt.axis('off')plt.subplot(2, 2, 2)plt.imshow(X_scaled, cmap='cool')plt.title('标准化数据')plt.axis('off')plt.subplot(2, 2, 3)plt.imshow(X_mmscaled, cmap='cool')plt.title('归一化数据')plt.axis('off')plt.subplot(2, 2, 4)plt.imshow(X, cmap='hot')plt.title('原始数据')plt.axis('off')plt.show() 特征降维
特征降维(如PCA)用于减少数据维度,同时保留主要信息。以下是PCA的实现:
import numpy as npfrom sklearn.preprocessing import StandardScalerfrom sklearn.decomposition import PCA# 加载红酒数据集wine = load_wine()X, y = wine['data'], wine['target']# 标准化数据scaler = StandardScaler()X_scaled = scaler.fit_transform(X)# 应用PCApca = PCA(n_components=2)pca.fit(X_scaled)X_pca = pca.transform(X_scaled)# 绘制主成分分析结果plt.figure()plt.scatter(X_pca[y==0, 0], X_pca[y==0, 1], c='b', s=60, edgecolors='k')plt.scatter(X_pca[y==1, 0], X_pca[y==1, 1], c='g', s=60, edgecolors='k')plt.scatter(X_pca[y==2, 0], X_pca[y==2, 1], c='r', s=60, edgecolors='k')plt.xlabel('主成分1')plt.ylabel('主成分2')plt.legend(wine['target_names'])plt.title('PCA结果')plt.show() 特征提取
特征提取技术如PCA和非负矩阵分解(NMF)可以从高维数据中提取低维特征。以下是NMF的实现:
import numpy as npfrom sklearn.decomposition import NMF# 生成随机矩阵np.random.seed(42)A = np.random.rand(100, 100)# 进行NMFnmf = NMF(n_components=3)nmf.fit(A)# 可视化结果plt.figure()plt.plot(nmf.components_.T)plt.title('NMF组件')plt.show() 聚类
聚类算法用于将数据划分为不同的簇。以下是K-means和DBSCAN的实现:
import numpy as npimport pandas as pdfrom sklearn.cluster import KMeans, DBSCAN# 生成聚类数据X = np.array([[1, 2], [1, 2], [5, 8], [5, 8], [5, 7], [5, 7], [0, 0], [0, 0], [0, 0], [0, 0]])# 使用K-means进行聚类model = KMeans(n_clusters=4, init='k-means++')y_pred = model.fit_predict(X)# 使用DBSCAN进行聚类model_1 = DBSCAN(eps=0.75, min_samples=10)y_pred_1 = model_1.fit_predict(X)# 绘制聚类结果plt.figure()plt.subplot(2, 2, 1)plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', alpha=0.7)plt.title('初始数据')plt.axis('off')plt.subplot(2, 2, 2)plt.scatter(X[:, 0], X[:, 1], c=y_pred, edgecolors='k', alpha=0.7)plt.title('K-means聚类')plt.axis('off')plt.subplot(2, 2, 3)plt.scatter(X[:, 0], X[:, 1], c=y_pred_1, edgecolors='k', alpha=0.7)plt.title('DBSCAN聚类')plt.axis('off')plt.show() 模型评估与优化
交叉验证是评估模型泛化性能的重要方法。以下是交叉验证的实现:
import numpy as npfrom sklearn.model_selection import cross_val_scorefrom sklearn.svm import SVC# 加载红酒数据集wine = load_wine()X, y = wine['data'], wine['target']# 定义SVM模型svc = SVC(kernel='linear')# 进行交叉验证scores = cross_val_score(svc, X, y, cv=6)print("交叉验证得分:", scores)print("交叉验证平均得分:", scores.mean()) 模型的管道模型
管道模型(Pipeline)允许将多个算法整合在一起,便于参数优化和模型训练。以下是使用管道模型进行模型选择和参数调优的实现:
from sklearn.datasets import make_blobsfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import StandardScalerfrom sklearn.neural_network import MLPClassifierfrom sklearn.model_selection import GridSearchCVfrom sklearn.pipeline import Pipelinefrom sklearn.feature_selection import SelectFromModel# 生成样本数据X, y = make_blobs(n_samples=200, centers=2, random_state=38)# 划分训练集和测试集X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=38)# 标准化数据scaler = StandardScaler()pipe = Pipeline([ ('scaler', scaler), ('select_model', SelectFromModel(RandomForestRegressor(random_state=38))), ('mlp', MLPClassifier(max_iter=2000, random_state=38))])# 定义网格搜索参数params = { 'mlp__hidden_layer_sizes': [(50,), (100,), (100, 100)], 'mlp__alpha': [0.0001, 0.001, 0.01, 0.1]}# 进行网格搜索grid = GridSearchCV(pipe, params, cv=3)grid.fit(X_train, y_train)# 输出结果print("模型最佳得分:", grid.best_score_)print("模型最佳参数:", grid.best_estimator_)print("测试集得分:", grid.score(X_test, y_test))print("管道模型步骤:", pipe.steps) 模型的管道模型允许我们将多个算法整合在一起,便于参数优化和模型训练。通过网格搜索,我们可以自动选择最佳的模型参数组合,从而提高模型性能。
转载地址:http://ekgjz.baihongyu.com/