1. 程式人生 > >用線性分類器實現預測鳶尾花的種類(python)

用線性分類器實現預測鳶尾花的種類(python)

rom dir Coding and func shape 交叉驗證 pri state

這是個人學習時跑的代碼,結果就不貼了,有需要的可以自己運行,僅供參考,有不知道的可以私下交流,有問題也可以聯系我。當然了我也只能提供一點建議,畢竟我也只是初學者

第一個頁面

# -*- coding: utf-8 -*-
#previous row is a way to use chinese
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import matplotlib.pyplot as plt
import sklearn as sk
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDClassifier
#use the other function instead of it due to not find the function named ‘.linear_modelsklearn._model‘ while wrote by the book
from sklearn import metrics
iris = datasets.load_iris()
X_iris, y_iris = iris.data, iris.target
#print X_iris.shape, y_iris.shape
#print X_iris[0],y_iris[0]
X,y = X_iris[:,:2],y_iris
X_train, X_test, y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=33)
#print X_train.shape,y_train.shape
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
colors = [‘red‘,‘greenyellow‘,‘blue‘]
for i in xrange(len(colors)):
xs = X_train[:, 0][y_train==i]
ys = X_train[:, 1][y_train==i]
plt.scatter(xs,ys, c=colors[i])
plt.legend(iris.target_names)
plt.xlabel(‘Sepal length‘)
plt.ylabel(‘Sepal width‘)
#plt.show()
clf = SGDClassifier()
clf.fit(X_train, y_train)
#print clf.coef_
#print clf.intercept_
x_min, x_max = X_train[:,0].min()-.5,X_train[:,0].max()+.5
y_min,y_max = X_train[:,1].min() - .5,X_train[:,1].max()+.5
xs=np.arange(x_min,x_max,0.5)
fig,axes=plt.subplots(1,3)
fig.set_size_inches(10,6)
for i in [0,1,2]:
axes[i].set_aspect(‘equal‘)
axes[i].set_title(‘Class ‘+ str(i)+‘ versus the rest‘)
axes[i].set_xlabel(‘Sepal length‘)
axes[i].set_ylabel(‘Sepal width‘)
axes[i].set_xlim(x_min,x_max)
axes[i].set_ylim(y_min,y_max)
plt.sca(axes[i])#sca is belong to matplotlib.pyplot we couldn‘t use it directly
plt.scatter(X_train[:,0],X_train[:,1],c=y_train,cmap=plt.prism())#we can‘t find the cm so use prism() to replace it
ys=(-clf.intercept_[i]-
xs*clf.coef_[i,0])/clf.coef_[i,1]#Xs is not defined so I use xs to replaced
plt.plot(xs,ys,hold=True)
plt.show()
#print clf.predict(scaler.transform([[4.7,3.1]]))
#print clf.decision_function(scaler.transform([[4.7,3.1]]))
y_train_pred=clf.predict(X_train)
#print metrics.accuracy_score(y_train,y_train_pred)
y_pred=clf.predict(X_test)
#print metrics.accuracy_score(y_test,y_pred)
#print metrics.classification_report(y_test,y_pred,target_names=iris.target_names)
print metrics.confusion_matrix(y_test,y_pred)

第二個頁面分開運行就好了,不過可能會調用第一個頁面,這個用了交叉驗證。

from sklearn.cross_validation import cross_val_score,KFold
from sklearn.pipeline import Pipeline
from sklearn import preprocessing
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from scipy.stats import sem
import numpy as np
iris = datasets.load_iris()
X_iris, y_iris = iris.data, iris.target
X,y = X_iris[:,:2],y_iris
clf=Pipeline([(‘scaler‘,preprocessing.StandardScaler()),(‘linear_model‘,SGDClassifier())])
cv=KFold(X.shape[0],5,shuffle=True,random_state=33)
scores=cross_val_score(clf,X,y,cv=cv)
#print scores
def mean_score(scores):
return ("Mean score: {0:.3f} (+/-{1:.3f})").format(np.mean(scores),sem(scores))
print mean_score(scores)

用線性分類器實現預測鳶尾花的種類(python)