-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathML Project - Getting Started
105 lines (84 loc) · 3.08 KB
/
ML Project - Getting Started
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#importing the dependencies
import pandas
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import model_selection
from sklearn.ensemble import VotingClassifier
#Data pre-processing
# loading the data
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length','sepal-width', 'petal-length','petal-width','class']
dataset = read_csv(url, names = names)
# dimension of the dataset
print(dataset.shape)
# take a peek at the data
print(dataset.head(20))
# statistical sumary
print(dataset.describe())
# class distribution
print(dataset.groupby('class').size())
#Visualising the Data
# uni-variate plots - box and whisker plots
dataset.plot(kind = 'box', subplots = True, layout = (2,2), sharex = False , sharey = False)
pyplot.show()
# histogram of the variable
dataset.hist()
pyplot.show()
# multivariate plots
scatter_matrix(dataset)
pyplot.show()
#Dataset Splitting
# creating a validation dataset
# splitting dataset
array = dataset.values
x = array[:,0:4]
y = array[:,4]
x_train, x_validation, y_train, y_validation = train_test_split(x, y, test_size = 0.2, random_state = 1)
#array = dataset.values
x = dataset.values[:,0:4]
y = dataset.values[:,4]
x_train, x_validation, y_train, y_validation = train_test_split(x, y, test_size = 0.2, random_state = 1)
#Modelling
# Logistic Regression
# Linear Discriminate Analysis
# K-Nearest neighbors
# Classification and Regression Trees
# Gaussian Naive Bayes
# Support Vector Machines
# building models
models = []
models.append(('LR', LogisticRegression(solver= 'liblinear', multi_class = 'ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma = 'auto')))
# evaluate the created models
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits = 10, random_state = 1)
cv_results = cross_val_score(model, x_train, y_train, cv = kfold, scoring = 'accuracy')
results.append(cv_results)
names.append(names)
print('%s: %f(%f)' % (name, cv_results.mean(), cv_results.std()))
# compare our models
pyplot.boxplot(results, labels = names)
pyplot.title("Algorithm Comparison")
pyplot.show()
# make predictions on svm
model = SVC(gamma = 'auto')
model.fit(x_train, y_train)
predictions = model.predict(x_validation)
# evaluate our predictions
print('Accuracy Score:',accuracy_score(y_validation, predictions))
print('Confusion Matrix:',confusion_matrix(y_validation, predictions))
print('Classification Report',classification_report(y_validation, predictions))