几种sklearn库直接实现分类算法

mac2022-06-30  25

机器学习入门——直接调用sklearn实现几种简单算法 刚学习机器学习,希望大佬们勿喷,望指点 几种分类算法针对鸢尾花数据的分析 1. LR线性回归分类算法

# 引入数据集,sklearn包含众多数据集 from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression iris = datasets.load_iris() iris_X = iris.data # print(iris_X) print('特征变量的长度', len(iris_X)) '''特征变量的长度 150''' # 目标值 iris_y = iris.target print('鸢尾花的目标值', iris_y) '''鸢尾花的目标值 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2]''' # 利用train_test_split进行训练集和测试机进行分开,test_size占30% X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3) # 我们看到训练数据的特征值分为3类 # print(y_train) #定义模型 LR = LinearRegression() LR.fit(X_train,y_train) LR.get_params() #预测模型得分 sorce = LR.score(X_test,y_test) print("此模型得分为%s"%sorce) '''此模型得分为0.9149861834006362''' # 预测数据,预测特征值 print(LR.predict(X_test)) '''[ 1.63941449 -0.04656291 1.22803812 1.597067 -0.04296543 -0.1082205 1.88684854 1.31080879 -0.00333587 1.8289367 1.28993837 1.75344022 1.09021679 -0.02400242 1.32395518 1.00228831 -0.17123785 1.205144 0.87926466 0.00947689 -0.06329835 1.52395814 1.15092907 -0.04949313 -0.04566142 1.00861719 2.14232013 2.00140341 2.03898734 -0.03660288 1.33849672 2.11454454 2.07733465 1.20349271 1.7931911 1.93649543 0.22430788 2.23932393 -0.09751398 1.15783423 0.96480882 0.05269551 1.18696115 0.08604333 1.35280072]''' # 打印真实特征值 print(y_test) '''[1 0 1 1 0 0 2 1 0 2 1 2 1 0 1 1 0 1 1 0 0 1 1 0 0 1 2 2 2 0 1 2 2 1 2 2 0 2 0 1 1 0 1 0 1]'''

``

2. LR逻辑回归分类

# 引入数据集,sklearn包含众多数据集 from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression iris = datasets.load_iris() iris_X = iris.data # print(iris_X) print('特征变量的长度', len(iris_X)) '''特征变量的长度 150''' # 目标值 iris_y = iris.target print('鸢尾花的目标值', iris_y) '''鸢尾花的目标值 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2]''' # 利用train_test_split进行训练集和测试机进行分开,test_size占30% X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3) # 我们看到训练数据的特征值分为3类 # print(y_train) #定义模型 LR2 = LogisticRegression(solver='liblinear',multi_class='auto') LR2.fit(X_train,y_train) LR2.get_params() #预测模型得分 print(LR2.score(X_test,y_test)) '''0.9111111111111111''' # 预测数据,预测特征值 print(LR2.predict(X_test)) '''[2 2 1 1 0 2 0 1 1 2 0 2 1 2 0 1 2 2 2 0 0 2 2 1 1 2 1 1 1 0 1 0 2 1 0 1 2 0 0 1 1 2 2 0 2]''' print(LR2.predict_proba(X_test)) '''[[1.22104991e-03 1.30272140e-01 8.68506810e-01] [9.79852169e-04 2.41605862e-01 7.57414286e-01] [1.25719561e-03 5.44803114e-01 4.53939691e-01] [3.55471553e-03 5.06603131e-01 4.89842154e-01] [8.95367170e-01 1.04427788e-01 2.05042722e-04] [2.12815155e-03 2.48240377e-01 7.49631472e-01] [9.05778113e-01 9.41804601e-02 4.14271890e-05] [5.01728421e-02 6.45676370e-01 3.04150788e-01] [2.81076432e-02 6.02733085e-01 3.69159272e-01] [2.13052357e-03 3.21325486e-01 6.76543991e-01] [8.51266864e-01 1.48486659e-01 2.46477255e-04] [1.49277132e-03 3.00367422e-01 6.98139807e-01] [5.05171010e-03 6.09816462e-01 3.85131828e-01] [1.66996640e-03 3.98390771e-01 5.99939262e-01] [9.35807441e-01 6.41845032e-02 8.05532508e-06] [1.95747419e-02 5.35312333e-01 4.45112925e-01] [1.23199326e-03 3.80603512e-01 6.18164495e-01] [7.09896939e-03 3.16673895e-01 6.76227135e-01] [9.99034699e-03 2.62770799e-01 7.27238854e-01] [9.04849509e-01 9.50900824e-02 6.04085668e-05] [8.77513430e-01 1.22386196e-01 1.00373539e-04] [5.88587565e-04 3.11367352e-01 6.88044061e-01] [1.83700903e-02 3.96810390e-01 5.84819520e-01] [4.57786535e-02 7.49492648e-01 2.04728699e-01] [2.20258363e-02 5.99046295e-01 3.78927868e-01] [3.03043450e-05 4.37725205e-01 5.62244491e-01] [4.94469922e-02 7.95191025e-01 1.55361983e-01] [2.00188103e-01 6.26303522e-01 1.73508375e-01] [1.71408229e-02 7.23308734e-01 2.59550443e-01] [8.16559633e-01 1.83295501e-01 1.44866085e-04] [2.95807754e-02 6.25457291e-01 3.44961933e-01] [8.83812308e-01 1.16122138e-01 6.55544671e-05] [2.52907867e-04 4.52963142e-01 5.46783950e-01] [6.38827106e-02 6.65823076e-01 2.70294214e-01] [9.13367766e-01 8.65549195e-02 7.73149961e-05] [2.92157233e-01 6.19289263e-01 8.85535043e-02] [2.41760216e-03 2.29557147e-01 7.68025251e-01] [8.83582358e-01 1.16329809e-01 8.78328071e-05] [9.11252675e-01 8.86811169e-02 6.62080101e-05] [6.57513830e-02 8.02250183e-01 1.31998434e-01] [1.92960980e-02 6.30766556e-01 3.49937346e-01] [3.19108179e-04 4.43955080e-01 5.55725812e-01] [1.20352606e-03 3.74321855e-01 6.24474619e-01] [9.39770744e-01 6.01739287e-02 5.53271544e-05] [3.86353397e-04 1.50795368e-01 8.48818278e-01]]''' # 打印真实特征值 print(y_test) '''[2 2 2 2 0 2 0 1 1 2 0 2 1 2 0 1 2 2 1 0 0 2 1 1 1 2 1 1 1 0 1 0 2 1 0 1 2 0 0 1 1 2 2 0 2] '''

3. knn算法

# 引入数据集,sklearn包含众多数据集 from sklearn import datasets # 将数据分为测试集和训练集 from sklearn.model_selection import train_test_split # 利用邻近点方式(knn)训练数据 from sklearn.neighbors import KNeighborsClassifier # 引入数据,本次导入鸢尾花数据,iris数据包含4个特征变量 iris = datasets.load_iris() # 特征变量 iris_X = iris.data # print(iris_X) print('特征变量的长度', len(iris_X)) # 目标值 iris_y = iris.target print('鸢尾花的目标值', iris_y) # 利用train_test_split进行训练集和测试机进行分开,test_size占30% X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3) # 我们看到训练数据的特征值分为3类 # print(y_train) # 训练数据 # 引入训练方法 knn = KNeighborsClassifier() # 进行填充测试数据进行训练 knn.fit(X_train, y_train) #获取参数 params = knn.get_params() print(params) score = knn.score(X_test, y_test) print("预测得分为:%s" % score) '''预测得分为:0.9555555555555556''' # 预测数据,预测特征值 print(knn.predict(X_test)) '''[0 0 2 0 2 0 1 2 1 2 1 2 0 1 0 2 0 1 0 1 1 2 2 0 1 1 2 2 2 2 0 0 1 0 1 2 1 1 2 0 2 0 2 0 1]''' # 打印真实特征值 print(y_test) '''[0 0 2 0 2 0 1 2 1 2 1 2 0 1 0 2 0 1 0 1 1 2 2 0 1 2 2 2 2 2 0 0 1 0 1 2 1 1 2 0 2 0 2 0 2]'''

4. 朴素贝叶斯算法(nb)

# 引入数据集,sklearn包含众多数据集 from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn import naive_bayes iris = datasets.load_iris() iris_X = iris.data # print(iris_X) print('特征变量的长度', len(iris_X)) # 目标值 iris_y = iris.target print('鸢尾花的目标值', iris_y) # 利用train_test_split进行训练集和测试机进行分开,test_size占30% X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3) # 我们看到训练数据的特征值分为3类 # print(y_train) #定义模型 nb = naive_bayes.GaussianNB() nb.fit(X_train,y_train) nb.get_params() #预测模型得分 sorce = nb.score(X_test,y_test) print("此模型得分为%s"%sorce) '''此模型得分为0.9555555555555556''' # 预测数据,预测特征值 print(nb.predict(X_test)) '''[1 1 2 2 0 0 2 0 2 1 1 1 0 0 0 2 2 2 1 1 2 2 2 0 0 1 2 0 0 0 0 1 0 1 1 1 1 2 0 0 0 1 2 0 0]''' # 打印真实特征值 print(y_test) '''[1 1 1 2 0 0 2 0 2 1 1 1 0 0 0 2 2 2 1 1 1 2 2 0 0 1 2 0 0 0 0 1 0 1 1 1 1 2 0 0 0 1 2 0 0]'''

- 5. 神经网络算法(bp)

# 引入数据集,sklearn包含众多数据集 rom sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier iris = datasets.load_iris() iris_X = iris.data # print(iris_X) print('特征变量的长度', len(iris_X)) # 目标值 iris_y = iris.target print('鸢尾花的目标值', iris_y) # 利用train_test_split进行训练集和测试机进行分开,test_size占30% X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3) # 我们看到训练数据的特征值分为3类 # print(y_train) #定义模型 bp = MLPClassifier(max_iter=10000) bp.fit(X_train,y_train) bp.get_params() #预测模型得分 sorce = bp.score(X_test,y_test) '''此模型得分为0.9555555555555556''' print("此模型得分为%s"%sorce) # 预测数据,预测特征值 print(bp.predict(X_test)) '''[2 1 2 0 2 2 2 2 1 0 1 2 2 2 2 2 1 2 2 0 1 2 0 1 2 2 1 2 1 0 0 1 1 0 2 2 1 2 0 2 0 2 0 2 2]''' # 打印真实特征值 print(y_test) '''[2 1 2 0 2 2 2 2 1 0 1 2 2 2 2 2 1 2 2 0 1 2 0 1 2 2 1 2 1 0 0 1 1 0 2 2 1 2 0 1 0 2 0 2 1]'''

6. 决策树算法##

# 引入数据集,sklearn包含众多数据集 from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn import tree iris = datasets.load_iris() iris_X = iris.data # print(iris_X) print('特征变量的长度', len(iris_X)) # 目标值 iris_y = iris.target print('鸢尾花的目标值', iris_y) # 利用train_test_split进行训练集和测试机进行分开,test_size占30% X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3) # 我们看到训练数据的特征值分为3类 # print(y_train) #定义模型 dt = tree.DecisionTreeClassifier() dt.fit(X_train,y_train) dt.get_params() #预测模型得分 sorce = dt.score(X_test,y_test) print("此模型得分为%s"%sorce) '''此模型得分为0.9111111111111111''' # 预测数据,预测特征值 print(dt.predict(X_test)) '''[0 2 2 2 0 2 1 2 0 0 1 2 2 1 0 1 1 1 0 2 1 1 2 2 0 0 0 2 0 2 2 0 1 1 0 0 1 2 0 2 1 0 2 2 2]''' # 打印真实特征值 print(y_test) '''[0 1 2 2 0 2 1 1 0 0 1 2 2 1 0 1 1 1 0 2 1 1 2 2 0 0 0 2 0 2 2 0 1 1 0 0 1 2 0 2 1 0 1 1 2]'''

7. Svm算法实现

# 引入数据集,sklearn包含众多数据集 from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.svm import SVC iris = datasets.load_iris() iris_X = iris.data # print(iris_X) print('特征变量的长度', len(iris_X)) # 目标值 iris_y = iris.target print('鸢尾花的目标值', iris_y) # 利用train_test_split进行训练集和测试机进行分开,test_size占30% X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3) # 我们看到训练数据的特征值分为3类 # print(y_train) #定义模型 svm = SVC(gamma='scale') svm.fit(X_train,y_train) svm.get_params() #预测模型得分 sorce = svm.score(X_test,y_test) print("此模型得分为%s"%sorce) '''此模型得分为0.9777777777777777''' # 预测数据,预测特征值 print(svm.predict(X_test)) '''[1 2 0 2 0 0 1 0 0 0 0 2 1 0 0 2 2 2 1 0 2 2 0 1 0 2 1 0 0 1 0 1 0 0 0 2 2 1 1 1 0 1 1 1 2]''' # 打印真实特征值 print(y_test) '''[1 2 0 2 0 0 1 0 0 0 0 2 1 0 0 2 2 2 2 0 2 2 0 1 0 2 1 0 0 1 0 1 0 0 0 2 2 1 1 1 0 1 1 1 2]'''
最新回复(0)