决策树算法
from math import log
import operator
def createDataSet():
dataSet = [[1,0,'yes'],
[1,0,'yes'],
[0,1,'no'],
[0,1,'no'],
[1,0,'no']]
labels = ['no surfacing','flippers']#对应1,0的标签
return dataSet, labels
def calcShannonEnt(dataSet):#计算香农熵的期望值
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
ShannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
ShannonEnt -= prob * log(prob,2)
return ShannonEnt
def splitDataSet(dataSet, axis, value):
#用于分离矩阵,axis是选中的最佳划分特征所在的列号,value是该特征的一个值
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
#axis这一列就相当于被删了,与下面del标签labels里的删除保持一致
reducedFeatVec.extend(featVec[axis+1:])
#注意extend和append在list中的区别
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
#选择最好的特征进行划分,主要是使得熵增益增加,即香农熵减小,数据混乱程度减小
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
#如果只有一种特征但是结果不唯一,那么就选择相同结果出现最多次的答案作为结果
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(),
key = operator.itemgetter(1),reverse = True)
return sortedClassCount[0][0]
def createTree(dataSet, labels):
classList = [example[-1] for example in dataSet]
#如果改划分的集合里结果都一样
if classList.count(classList[0]) == len(classList):
return classList[0]
#如果特征标签只剩下一个
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(
splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree
def classify(inputTree, featLabels, testVec):
#因为并不知道按特征分类的先后顺序,所以要写一个分类器
firSides = list(inputTree.keys())
firStr = firSides[0]
secondDict = inputTree[firStr]
featIndex = featLabels.index(firStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else : classLabel = secondDict[key]
return classLabel
#序列化操作,注意是wb和rb
def storeTree(inputTree, filename):
import pickle
fw = open(filename, 'wb')
pickle.dump(inputTree, fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename,'rb')
return pickle.load(fr)
决策树作图
import matplotlib.pyplot as plt
#boxstyle是结点的形状,fc是颜色深度
decisionNode = dict(boxstyle = "sawtooth", fc = "0.8")
leafNode = dict(boxstyle = "round4", fc = "0.8")
arrow_args = dict(arrowstyle = "<-")
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
#nodeTxt表示标注的内容,centerPt表示当前结点的坐标,parentPt是父结点的坐标
#node是节点类型
createPlot.ax1.annotate(nodeTxt, xy = parentPt,
xycoords = 'axes fraction',
xytext = centerPt, textcoords = 'axes fraction',
va = 'center', ha = 'center', bbox = nodeType,
arrowprops = arrow_args)
def getNumLeafs(myTree):
#获得所有结点的个数,作为作图的宽度
numLeafs = 0
firstSides = list(myTree.keys())
firstStr = firstSides[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else : numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
#获得树的高度
maxDepth = 0
firstSides = list(myTree.keys())
firstStr = firstSides[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else :thisDepth = 1
if thisDepth > maxDepth : maxDepth = thisDepth
return maxDepth
def retrieveTree(i):#测试样例
listOfTree = [{'no surfacing' : {0 : 'no', 1 : {'flippers':
{0 : 'no', 1 : 'yes'}}}},
{'no surfacing':{0:'no', 1 : {'flippers':
{0:{'head':{0:'no', 1 : 'yes'}}, 1:'no'
}}}}]
return listOfTree[i]
def plotMidText(cntrPt, parentPt, txtString):
#两个结点中间的文字,即某个特征的值
xMid = (parentPt[0] - cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1] - cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString)
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstSides = list(myTree.keys())
firstStr = firstSides[0]
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 /
plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key], cntrPt, str(key))
else :
plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff),
cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
def createPlot(inTree):
fig = plt.figure(1, facecolor = 'white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
#以下为定义的常量
createPlot.ax1 = plt.subplot(111, frameon = False, **axprops)
plotTree.totalW = float(getNumLeafs(inTree))#树的宽度
plotTree.totalD = float(getTreeDepth(inTree))#树的高度
plotTree.xOff = -0.5/plotTree.totalW;
#表示最近画完的一个结点横坐标
plotTree.yOff = 1.0;
#表示最近画完的一个结点的纵坐标
#这里定义plotTree.xOff,plotTree.yOff的初始状态
plotTree(inTree, (0.5, 1.0), ' ')
plt.show()