k-Nearest Neighbors(KNN)演算法—程式和總結篇
阿新 • • 發佈:2019-01-01
下面是python3.4程式碼,我修改過。是根據《machine learning in action》中第二章的演算法改變的。
from numpy import * import operator from os import listdir def file2matrix(filename): fr = open(filename) numberOfLines = len(fr.readlines()) returnMat = zeros((numberOfLines, 3)) classLabelVector = [] fr = open(filename) index = 0 for line in fr.readlines(): line = line.strip() listFromLine = line.split('\t') returnMat[index,:] = listFromLine[0:3] classLabelVector.append(str(listFromLine[3])) index += 1 return returnMat, classLabelVector #測試案例 def classifyPerson(): resultList = ['not at all', 'in small doses', 'in large doses'] percentTats = float(input("percentage of time spent playing video games?")) ffMiles = float(input("frequent filer miles earned per year?")) iceCream = float(input("liters of ice cream consumed per year?")) datingDataMat, datingLabels = file2matrix('D:\machinelearninginaction\Ch02\datingTestSet.txt') normMat, ranges, minVals = autoNorm(datingDataMat) inArr = array([ffMiles, percentTats, iceCream]) classifierResult = classify0((inArr-minVals)/ranges, normMat, datingLabels, 3) print ("You will probably like this person: numbers of results:", resultList[2]) #簡單的knn演算法 def classify0(inX, dataSet, labels, k): dataSetSize = dataSet.shape[0] diffMat = tile(inX, (dataSetSize,1)) - dataSet sqDiffMat = diffMat**2 sqDistances = sqDiffMat.sum(axis=1) distances = sqDistances**0.5 sortedDistIndicies = distances.argsort() classCount={} for i in range(k): voteIlabel = labels[sortedDistIndicies[i]] classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] def createDataSet(): group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]]) labels = ['A','A','B','B'] return group, labels def file2matrix(filename): fr = open(filename) numberOfLines = len(fr.readlines()) #get the number of lines in the file returnMat = zeros((numberOfLines,3)) #prepare matrix to return classLabelVector = [] #prepare labels return fr = open(filename) index = 0 for line in fr.readlines(): line = line.strip() listFromLine = line.split('\t') returnMat[index,:] = listFromLine[0:3] classLabelVector.append(str(listFromLine[3])) index += 1 return returnMat,classLabelVector #正規化 def autoNorm(dataSet): minVals = dataSet.min(0) maxVals = dataSet.max(0) ranges = maxVals - minVals normDataSet = zeros(shape(dataSet)) m = dataSet.shape[0] normDataSet = dataSet - tile(minVals, (m,1)) normDataSet = normDataSet/tile(ranges, (m,1)) #element wise divide return normDataSet, ranges, minVals #測試案例 def datingClassTest(): hoRatio = 0.50 #hold out 10% datingDataMat,datingLabels = file2matrix('D:\machinelearninginaction\Ch02\datingTestSet2.txt') #load data setfrom file normMat, ranges, minVals = autoNorm(datingDataMat) m = normMat.shape[0] numTestVecs = int(m*hoRatio) errorCount = 0.0 for i in range(numTestVecs): classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3) print ("the classifier came back with: %d, the real answer is: %s" % (classifierResult, datingLabels[i])) if (classifierResult != datingLabels[i]): errorCount += 1.0 print ("the total error rate is: %f" % (errorCount/float(numTestVecs))) print (errorCount) def img2vector(filename): returnVect = zeros((1,1024)) fr = open(filename) for i in range(32): lineStr = fr.readline() for j in range(32): returnVect[0,32*i+j] = int(lineStr[j]) return returnVect #測試案例 def handwritingClassTest(): hwLabels = [] trainingFileList = listdir('D:\\machinelearninginaction\\Ch02\\trainingDigits') #load the training set m = len(trainingFileList) trainingMat = zeros((m,1024)) for i in range(m): fileNameStr = trainingFileList[i] fileStr = fileNameStr.split('.')[0] #take off .txt classNumStr = int(fileStr.split('_')[0]) hwLabels.append(classNumStr) trainingMat[i,:] = img2vector('D:\machinelearninginaction\Ch02\\trainingDigits\%s' % fileNameStr) testFileList = listdir('D:\\machinelearninginaction\\Ch02\\testDigits') #iterate through the test set errorCount = 0.0 mTest = len(testFileList) for i in range(mTest): fileNameStr = testFileList[i] fileStr = fileNameStr.split('.')[0] #take off .txt classNumStr = int(fileStr.split('_')[0]) vectorUnderTest = img2vector('D:\machinelearninginaction\Ch02\\testDigits\%s' % fileNameStr) classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3) print ("the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr)) if (classifierResult != classNumStr): errorCount += 1.0 print ("\nthe total number of errors is: %d" % errorCount) print ("\nthe total error rate is: %f" % (errorCount/float(mTest)))
在python3.4控制是臺輸入下面的程式碼進行測試:
>>>kNN.classify0([0,0], group, labels, 3)
>>> reload(kNN)
>>> datingDataMat,datingLabels = kNN.file2matrix('datingTestSet.txt')
>>> kNN.datingClassTest()
>>> kNN.classifyPerson()
>>> testVector = kNN.img2vector('testDigits/0_13.txt')
>>> kNN.handwritingClassTest()
下面的圖片是我實現程式例子的部分圖片:
總結:kNN是一個簡單和有效的資料分類的演算法。它是基於例項的機器學習演算法,只是需要手邊有資料進行學習。它需要遍歷整個資料集,對於大量的資料,需要將待預測的一條資料同整個資料集中的每一條資料都要進行距離計算,這是有些棘手的(耗時),而且佔用儲存資源。
kNN的第一個缺點就是,我們對資料的底層結構(符合正態是還是伯努利分佈)沒有清晰的看法;而且,也不知道均值,和在某一個分類中的案例看起來應該有什麼樣的特點。