-
Notifications
You must be signed in to change notification settings - Fork 0
/
IrisFlowerDataset.py
63 lines (61 loc) · 2.32 KB
/
IrisFlowerDataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import model_selection
from sklearn.ensemble import VotingClassifier
URL= "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names=['sepal-length','sepal-width','petal-length','petal-width','class']
dataset=read_csv(URL,names=names)
print(dataset.shape)
print(dataset.head(20))
print(dataset.describe())
print(dataset.groupby('class').size())
#univariate plot-box and whisker plots
dataset.plot(kind='box',subplots=True,layout=(2,2),sharex=False,sharey=False)
pyplot.show()
#visualizing through histogram
dataset.hist()
pyplot.show()
#multivariate plots
scatter_matrix(dataset)
pyplot.show()
array=dataset.values
X=array[:,0:4]
Y=array[:,4]
X_train,X_validation,Y_train,Y_validation=train_test_split(X,Y,test_size=0.2,random_state=1)
models=[]
models.append(('LR',LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA',LinearDiscriminantAnalysis()))
models.append(('KNN',KNeighborsClassifier()))
models.append(('NB',GaussianNB()))
models.append(('SVM',SVC(gamma='auto')))
results=[]
names=[]
#calculating the accuracy score for each type of algorithm
for name,model in models:
kfold=StratifiedKFold(n_splits=10)
cv_results=cross_val_score(model,X_train,Y_train,cv=kfold,scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)'%(name, cv_results.mean(), cv_results.std()))
pyplot.boxplot(results,labels=names)
pyplot.title("Algorithm Comparison")
pyplot.show()
model=SVC(gamma='auto')
model.fit(X_train, Y_train)
pred=model.predict(X_validation)
print(accuracy_score(Y_validation,pred))
print(confusion_matrix(Y_validation,pred))
print(classification_report(Y_validation,pred))