Machine Learning分類法有兩篇寫的淺顯昜懂,就不重覆造輪子了,直接看
1、Decision Tree
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# 讀入鳶尾花資料
iris = load_iris()
iris_X = iris.data
iris_y = iris.target
# 切分訓練與測試資料
train_X, test_X, train_y, test_y = train_test_split(iris_X, iris_y, test_size = 0.3)
# 建立分類器
clf = tree.DecisionTreeClassifier()
iris_clf = clf.fit(train_X, train_y)
# 預測
test_y_predicted = iris_clf.predict(test_X)
print(test_y_predicted)
# 標準答案
print(test_y)
# 績效
accuracy = metrics.accuracy_score(test_y, test_y_predicted)
print(accuracy)
2、knn
from sklearn.datasets import load_iris
from sklearn import neighbors
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# 讀入鳶尾花資料
iris = load_iris()
iris_X = iris.data
iris_y = iris.target
# 切分訓練與測試資料
train_X, test_X, train_y, test_y = train_test_split(iris_X, iris_y, test_size = 0.3)
# 建立分類器
clf = neighbors.KNeighborsClassifier()
iris_clf = clf.fit(train_X, train_y)
# 預測
test_y_predicted = iris_clf.predict(test_X)
print(test_y_predicted)
# 標準答案
print(test_y)
2、knn與svm的比較
這樣就很清楚分類的用法啦!
沒有留言:
張貼留言