0% found this document useful (0 votes)
8 views6 pages

ML-1

The document presents implementations of various machine learning algorithms including Artificial Neural Networks, Support Vector Classifiers, Random Forest Classifiers, and Clustering techniques using the Iris dataset. It demonstrates data preprocessing, model training, evaluation, and visualization of results. Key metrics such as accuracy and silhouette scores are calculated to assess model performance.

Uploaded by

govindan7707
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
8 views6 pages

ML-1

The document presents implementations of various machine learning algorithms including Artificial Neural Networks, Support Vector Classifiers, Random Forest Classifiers, and Clustering techniques using the Iris dataset. It demonstrates data preprocessing, model training, evaluation, and visualization of results. Key metrics such as accuracy and silhouette scores are calculated to assess model performance.

Uploaded by

govindan7707
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 6

Artificial Neural Network

import numpy as np

x = np.array(([2,9],[1,5],[3,6]),dtype = float)

y = np.array(([92],[86],[89]),dtype = float)

x = x/np.amax(x,axis=0)

y = y/100

def sigmoid(x):

return 1/(1 + np.exp(-x))

def derivatives_sigmoid(x):

return x*(1-x)

epoch = 5000

lr = 0.1

iln = 2

hln = 3

on = 1

wh = np.random.uniform(size=(iln,hln))

bh = np.random.uniform(size = (1,hln))

wout = np.random.uniform(size=(hln,on))

bout = np.random.uniform(size=(1,on))

for i in range(epoch):

hinp1 = np.dot(x,wh)

hinp = hinp1+bh
hlayer_act = sigmoid(hinp)

outinp1 = np.dot(hlayer_act,wout)

outinp = outinp1+bout

output = sigmoid(outinp)

eo = y-output

outgrad = derivatives_sigmoid(output)

d_output = eo*outgrad

eh = d_output.dot(wout.T)

hiddengrad = derivatives_sigmoid(hlayer_act)

d_hiddenlayer = eh*hiddengrad

wout +=hlayer_act.T.dot(d_output)*lr

wh += x.T.dot(d_hiddenlayer)*lr

print(str(x))

print(str(y))

print(output)
Support Vector Classifier

from sklearn.metrics import classification_report,accuracy_score

from sklearn.datasets import load_iris

from sklearn.preprocessing import StandardScaler

import pandas as pd

import numpy as np

from sklearn.model_selection import train_test_split

from sklearn.svm import SVC

data = load_iris()

x = data.data

y = data.target

X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=42,stratify=y)

scaler = StandardScaler()

X_train = scaler.fit_transform(X_train)

X_test = scaler.transform(X_test)

svc_model = SVC(kernel='rbf',C=0.1,gamma='scale',random_state=42)

svc_model.fit(X_train,y_train)

y_pred = svc_model.predict(X_test)

print(classification_report(y_test,y_pred))

print(accuracy_score(y_test,y_pred))
Random Forest Classifier

import pandas as pd

import numpy as np

from sklearn.metrics import classification_report,accuracy_score

from sklearn.datasets import load_iris

from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt

from sklearn.ensemble import RandomForestClassifier

iris = load_iris()

x = iris.data

y = iris.target

X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=42)

rf_classifier = RandomForestClassifier(n_estimators=100,random_state=42)

rf_classifier.fit(X_train,y_train)

y_pred = rf_classifier.predict(X_test)

print(classification_report(y_test,y_pred))

print(accuracy_score(y_test,y_pred))

feature_importances = rf_classifier.feature_importances_

features = iris.feature_names

plt.figure(figsize=(10,6))

plt.barh(features,feature_importances, color = 'skyblue')

plt.xlabel("feature importances")

plt.show()
Clustering

import pandas as pd

import numpy as np

from sklearn.metrics import silhouette_score

from sklearn.datasets import load_iris

import matplotlib.pyplot as plt

from sklearn.cluster import KMeans

from sklearn.mixture import GaussianMixture

iris = load_iris()

x = pd.DataFrame(iris.data,columns = iris.feature_names)

y = iris.target

plt.figure(figsize=(10,6))

plt.subplot(1,2,1)

plt.scatter(x['sepal length (cm)'],x['sepal width (cm)'],c=y,s=40)

plt.title("actual sepal")

plt.subplot(1,2,2)

plt.scatter(x['petal length (cm)'],x['petal width (cm)'],c=y,s=40)

plt.title("actual petal")

kmeans = KMeans(n_clusters=3)

kmeans.fit(x)

kmeans_clusters = kmeans.labels_

gmm = GaussianMixture(n_components=3)

gmm.fit(x)
gmm_clusters = gmm.predict(x)

kmeans_silhouette = silhouette_score(x,kmeans_clusters)

gmm_silhouette = silhouette_score(x,gmm_clusters)

print("kmeans_silhouette :",kmeans_silhouette)

print("gmm_silhouette :",gmm_silhouette)

plt.figure(figsize=(10,6))

plt.subplot(1,3,1)

plt.scatter(x['sepal length (cm)'],x['petal width (cm)'],c=y,s=40)

plt.title("actual classes")

plt.subplot(1,3,2)

plt.scatter(x['sepal length (cm)'],x['petal width (cm)'],c=kmeans_clusters,s=40)

plt.title("kmeans")

plt.subplot(1,3,3)

plt.scatter(x['sepal length (cm)'],x['petal width (cm)'],c=gmm_clusters,s=40)

plt.title("gmm")

plt.show()

if kmeans_silhouette>gmm_silhouette:

print("kmeans")

else:

print("gmm")

You might also like