def print_decision_tree(tree, feature_names=None, offset_unit=' '):
'''Plots textual representation of rules of a decision tree
tree: scikit-learn representation of tree
feature_names: list of feature names. They are set to f1,f2,f3,... if not specified
offset_unit: a string of offset of the conditional block'''
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
value = tree.tree_.value
if feature_names is None:
features = ['f%d'%i for i in tree.tree_.feature]
else:
features = [feature_names[i] for i in tree.tree_.feature]
def recurse(left, right, threshold, features, node, depth=0):
offset = offset_unit*depth
if (threshold[node] != -2):
print(offset+"if ( " + features[node] + " <= " + str(threshold[node]) + " ) {")
if left[node] != -1:
recurse (left, right, threshold, features,left[node],depth+1)
print(offset+"} else {")
if right[node] != -1:
recurse (left, right, threshold, features,right[node],depth+1)
print(offset+"}")
else:
print(offset+"return " + str(value[node]))
recurse(left, right, threshold, features, 0,0)
# SQL from decision tree
def get_lineage(tree, feature_names):
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
features = [feature_names[i] for i in tree.tree_.feature]
le='<='
g ='>'
# get ids of child nodes
idx = np.argwhere(left == -1)[:,0]
def recurse(left, right, child, lineage=None):
if lineage is None:
lineage = [child]
if child in left:
parent = np.where(left == child)[0].item()
split = 'l'
else:
parent = np.where(right == child)[0].item()
split = 'r'
lineage.append((parent, split, threshold[parent], features[parent]))
if parent == 0:
lineage.reverse()
return lineage
else:
return recurse(left, right, parent, lineage)
print 'case '
for j,child in enumerate(idx):
clause=' when '
for node in recurse(left, right, child):
if len(str(node))<3:
continue
i=node
if i[1]=='l': sign=le
else: sign=g
clause=clause+i[3]+sign+str(i[2])+' and '
clause=clause[:-4]+' then '+str(j)
print clause
print 'else 99 end as clusters'
def print_decision_tree(tree, feature_names, offset_unit=' '):
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
value = tree.tree_.value
if feature_names is None:
features = ['f%d'%i for i in tree.tree_.feature]
else:
features = [feature_names[i] for i in tree.tree_.feature]
def recurse(left, right, threshold, features, node, depth=0):
offset = offset_unit*depth
if (threshold[node] != -2):
print(offset+"if ( " + features[node] + " <= " + str(threshold[node]) + " ) {")
if left[node] != -1:
recurse (left, right, threshold, features,left[node],depth+1)
print(offset+"} else {")
if right[node] != -1:
recurse (left, right, threshold, features,right[node],depth+1)
print(offset+"}")
else:
#print(offset,value[node])
#To remove values from node
temp=str(value[node])
mid=len(temp)//2
tempx=[]
tempy=[]
cnt=0
for i in temp:
if cnt<=mid:
tempx.append(i)
cnt+=1
else:
tempy.append(i)
cnt+=1
val_yes=[]
val_no=[]
res=[]
for j in tempx:
if j=="[" or j=="]" or j=="." or j==" ":
res.append(j)
else:
val_no.append(j)
for j in tempy:
if j=="[" or j=="]" or j=="." or j==" ":
res.append(j)
else:
val_yes.append(j)
val_yes = int("".join(map(str, val_yes)))
val_no = int("".join(map(str, val_no)))
if val_yes>val_no:
print(offset,'\033[1m',"YES")
print('\033[0m')
elif val_no>val_yes:
print(offset,'\033[1m',"NO")
print('\033[0m')
else:
print(offset,'\033[1m',"Tie")
print('\033[0m')
recurse(left, right, threshold, features, 0,0)
import numpy as np
import pickle
feature_names=.............
features = [feature_names[i] for i in range(len(feature_names))]
clf= pickle.loads(trained_model)
impurity=clf.tree_.impurity
importances = clf.feature_importances_
SqlOut=""
#global Conts
global ContsNode
global Path
#Conts=[]#
ContsNode=[]
Path=[]
global Results
Results=[]
def print_decision_tree(tree, feature_names, offset_unit='' ''):
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
value = tree.tree_.value
if feature_names is None:
features = [''f%d''%i for i in tree.tree_.feature]
else:
features = [feature_names[i] for i in tree.tree_.feature]
def recurse(left, right, threshold, features, node, depth=0,ParentNode=0,IsElse=0):
global Conts
global ContsNode
global Path
global Results
global LeftParents
LeftParents=[]
global RightParents
RightParents=[]
for i in range(len(left)): # This is just to tell you how to create a list.
LeftParents.append(-1)
RightParents.append(-1)
ContsNode.append("")
Path.append("")
for i in range(len(left)): # i is node
if (left[i]==-1 and right[i]==-1):
if LeftParents[i]>=0:
if Path[LeftParents[i]]>" ":
Path[i]=Path[LeftParents[i]]+" AND " +ContsNode[LeftParents[i]]
else:
Path[i]=ContsNode[LeftParents[i]]
if RightParents[i]>=0:
if Path[RightParents[i]]>" ":
Path[i]=Path[RightParents[i]]+" AND not " +ContsNode[RightParents[i]]
else:
Path[i]=" not " +ContsNode[RightParents[i]]
Results.append(" case when " +Path[i]+" then ''" +"{:4d}".format(i)+ " "+"{:2.2f}".format(impurity[i])+" "+Path[i][0:180]+"''")
else:
if LeftParents[i]>=0:
if Path[LeftParents[i]]>" ":
Path[i]=Path[LeftParents[i]]+" AND " +ContsNode[LeftParents[i]]
else:
Path[i]=ContsNode[LeftParents[i]]
if RightParents[i]>=0:
if Path[RightParents[i]]>" ":
Path[i]=Path[RightParents[i]]+" AND not " +ContsNode[RightParents[i]]
else:
Path[i]=" not "+ContsNode[RightParents[i]]
if (left[i]!=-1):
LeftParents[left[i]]=i
if (right[i]!=-1):
RightParents[right[i]]=i
ContsNode[i]= "( "+ features[i] + " <= " + str(threshold[i]) + " ) "
recurse(left, right, threshold, features, 0,0,0,0)
print_decision_tree(clf,features)
SqlOut=""
for i in range(len(Results)):
SqlOut=SqlOut+Results[i]+ " end,"+chr(13)+chr(10)
import string
from sklearn.tree import export_text
def export_py_code(tree, feature_names, max_depth=100, spacing=4):
if spacing < 2:
raise ValueError('spacing must be > 1')
# Clean up feature names (for correctness)
nums = string.digits
alnums = string.ascii_letters + nums
clean = lambda s: ''.join(c if c in alnums else '_' for c in s)
features = [clean(x) for x in feature_names]
features = ['_'+x if x[0] in nums else x for x in features if x]
if len(set(features)) != len(feature_names):
raise ValueError('invalid feature names')
# First: export tree to text
res = export_text(tree, feature_names=features,
max_depth=max_depth,
decimals=6,
spacing=spacing-1)
# Second: generate Python code from the text
skip, dash = ' '*spacing, '-'*(spacing-1)
code = 'def decision_tree({}):\n'.format(', '.join(features))
for line in repr(tree).split('\n'):
code += skip + "# " + line + '\n'
for line in res.split('\n'):
line = line.rstrip().replace('|',' ')
if '<' in line or '>' in line:
line, val = line.rsplit(maxsplit=1)
line = line.replace(' ' + dash, 'if')
line = '{} {:g}:'.format(line, float(val))
else:
line = line.replace(' {} class:'.format(dash), 'return')
code += skip + line + '\n'
return code
示例用法:
res = export_py_code(tree, feature_names=names, spacing=4)
print (res)
样例输出:
def decision_tree(f1, f2, f3):
# DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=3,
# max_features=None, max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, presort=False,
# random_state=42, splitter='best')
if f1 <= 12.5:
if f2 <= 17.5:
if f1 <= 10.5:
return 2
if f1 > 10.5:
return 3
if f2 > 17.5:
if f2 <= 22.5:
return 1
if f2 > 22.5:
return 1
if f1 > 12.5:
if f1 <= 17.5:
if f3 <= 23.5:
return 2
if f3 > 23.5:
return 3
if f1 > 17.5:
if f1 <= 25:
return 1
if f1 > 25:
return 2
上面的例子是用names = ['f'+str(j+1) for j in range(NUM_FEATURES)]生成的。
from numba import jit,njit
@njit
def predict(X):
ret = 0
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
ret += 1
else: # if w_pizza > 0.5
pass
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
pass
else: # if w_pizza > 0.5
ret += 1
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
ret += 1
else: # if w_mexico > 0.5
ret += 1
else: # if w_pizza > 0.5
pass
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
ret += 1
else: # if w_mexico > 0.5
pass
else: # if w_pizza > 0.5
ret += 1
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
pass
else: # if w_pizza > 0.5
pass
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
ret += 1
else: # if w_pizza > 0.5
ret += 1
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
pass
else: # if w_pizza > 0.5
ret += 1
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
pass
else: # if w_pizza > 0.5
pass
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
pass
else: # if w_pizza > 0.5
pass
if X[0] <= 0.5: # if w_pizza <= 0.5
if X[1] <= 0.5: # if w_mexico <= 0.5
if X[2] <= 0.5: # if w_reusable <= 0.5
ret += 1
else: # if w_reusable > 0.5
pass
else: # if w_mexico > 0.5
pass
else: # if w_pizza > 0.5
pass
return ret/10
from sklearn import datasets
from sklearn.tree import DecisionTreeRegressor
from sklearn import tree
# Prepare the data data
boston = datasets.load_boston()
X = boston.data
y = boston.target
# Fit the regressor, set max_depth = 3
regr = DecisionTreeRegressor(max_depth=3, random_state=1234)
model = regr.fit(X, y)
# Print rules
rules = get_rules(regr, boston.feature_names, None)
for r in rules:
print(r)
印刷规则:
if (RM <= 6.941) and (LSTAT <= 14.4) and (DIS > 1.385) then response: 22.905 | based on 250 samples
if (RM <= 6.941) and (LSTAT > 14.4) and (CRIM <= 6.992) then response: 17.138 | based on 101 samples
if (RM <= 6.941) and (LSTAT > 14.4) and (CRIM > 6.992) then response: 11.978 | based on 74 samples
if (RM > 6.941) and (RM <= 7.437) and (NOX <= 0.659) then response: 33.349 | based on 43 samples
if (RM > 6.941) and (RM > 7.437) and (PTRATIO <= 19.65) then response: 45.897 | based on 29 samples
if (RM <= 6.941) and (LSTAT <= 14.4) and (DIS <= 1.385) then response: 45.58 | based on 5 samples
if (RM > 6.941) and (RM <= 7.437) and (NOX > 0.659) then response: 14.4 | based on 3 samples
if (RM > 6.941) and (RM > 7.437) and (PTRATIO > 19.65) then response: 21.9 | based on 1 samples