forked from asitang/sklearn-hierarchical-classification
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathclassify_digits.py
executable file
·87 lines (71 loc) · 2.43 KB
/
classify_digits.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#!/usr/bin/env python
"""
Example of using the hierarchical classifier to classify (a subset of) the digits data set.
Demonstrated some of the capabilities, e.g using a Pipeline as the base estimator,
defining a non-trivial class hierarchy, etc.
"""
from sklearn import svm
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn_hierarchical_classification.classifier import HierarchicalClassifier
from sklearn_hierarchical_classification.constants import ROOT
from sklearn_hierarchical_classification.metrics import h_fbeta_score, multi_labeled
from sklearn_hierarchical_classification.tests.fixtures import make_digits_dataset
# Used for seeding random state
RANDOM_STATE = 42
def classify_digits():
r"""Test that a nontrivial hierarchy leaf classification behaves as expected.
We build the following class hierarchy along with data from the handwritten digits dataset:
<ROOT>
/ \
A B
/ \ | \
1 7 C 9
/ \
3 8
"""
class_hierarchy = {
ROOT: ["A", "B"],
"A": ["1", "7"],
"B": ["C", "9"],
"C": ["3", "8"],
}
base_estimator = make_pipeline(
TruncatedSVD(n_components=24),
svm.SVC(
gamma=0.001,
kernel="rbf",
probability=True
),
)
clf = HierarchicalClassifier(
base_estimator=base_estimator,
class_hierarchy=class_hierarchy,
)
X, y = make_digits_dataset(
targets=[1, 7, 3, 8, 9],
as_str=False,
)
# cast the targets to strings so we have consistent typing of labels across hierarchy
y = y.astype(str)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=RANDOM_STATE,
)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Classification Report:\n", classification_report(y_test, y_pred))
# Demonstrate using our hierarchical metrics module with MLB wrapper
with multi_labeled(y_test, y_pred, clf.graph_) as (y_test_, y_pred_, graph_):
h_fbeta = h_fbeta_score(
y_test_,
y_pred_,
graph_,
)
print("h_fbeta_score: ", h_fbeta)
if __name__ == "__main__":
classify_digits()