0% found this document useful (0 votes)
25 views14 pages

AIML Lab Programs Overview

The document discusses implementations of various graph algorithms including breadth-first search, depth-first search, A* search, and memory-bounded A* search. It also discusses implementing naive Bayes classification including calculating class priors, likelihoods, predictor priors, and making predictions.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
25 views14 pages

AIML Lab Programs Overview

The document discusses implementations of various graph algorithms including breadth-first search, depth-first search, A* search, and memory-bounded A* search. It also discusses implementing naive Bayes classification including calculating class priors, likelihoods, predictor priors, and making predictions.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 14

AIML ECE ALL LAB PROGRAMS

1 a)
class Graph:
def __init__(self, vertices):
self.V = vertices
self.adj = [[] for i in range(vertices)]

def add_edge(self, u, v):


self.adj[u].append(v)

def bfs(self, start):


visited = [False] * self.V
queue = []
queue.append(start)
visited[start] = True
while queue:
node = queue.pop(0)
print(node, end=" ")
for neighbor in self.adj[node]:
if not visited[neighbor]:
visited[neighbor] = True
queue.append(neighbor)

g = Graph(6)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(1, 4)
g.add_edge(2, 4)
g.add_edge(3, 4)
g.add_edge(3, 5)
g.add_edge(4, 5)
print("BFS:")
g.bfs(0)

-----------------------------------------------------------------------------------
-------------------------------------
1 b)
class Graph:
def __init__(self, vertices):
self.V = vertices
self.adj = [[] for i in range(vertices)]

def add_edge(self, u, v):


self.adj[u].append(v)

def dfs(self, start):


visited = [False] * self.V
stack = []
stack.append(start)
visited[start] = True
while stack:
node = stack.pop()
print(node, end=" ")
for neighbor in self.adj[node]:
if not visited[neighbor]:
visited[neighbor] = True
stack.append(neighbor)
g = Graph(6)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(1, 4)
g.add_edge(2, 4)
g.add_edge(3, 4)
g.add_edge(3, 5)
g.add_edge(4, 5)
print("DFS:")
g.dfs(0)
-----------------------------------------------------------------------------------
-------------------------------------
2
a)import heapq

class Node:
def __init__(self, state, parent=None, g=0, h=0):
self.state = state
self.parent = parent
self.g = g
self.h = h

def f_score(self):
return self.g + self.h

def __lt__(self, other):


return self.f_score() < other.f_score()

def __eq__(self, other):


return self.state == other.state

class Graph:
def __init__(self, vertices, edges):
self.vertices = vertices
self.edges = edges

def neighbors(self, node):


return [Node(state) for state in self.edges.get(node.state, [])]

def astar_search(graph, start, goal, heuristic):


start_node = Node(start)
goal_node = Node(goal)
open_list = []
closed_list = set()
heapq.heappush(open_list, (start_node.f_score(), start_node))

while open_list:
current_node = heapq.heappop(open_list)[1]

if current_node.state == goal_node.state:
path = []
while current_node:
path.append(current_node.state)
current_node = current_node.parent
return list(reversed(path))

closed_list.add(current_node.state)
for neighbor in graph.neighbors(current_node):
if neighbor.state in closed_list:
continue

new_g = current_node.g + 1
new_h = heuristic(neighbor.state, goal)
new_f = new_g + new_h

if any(item[1].state == neighbor.state for item in open_list):


if new_g < neighbor.g:
neighbor.g = new_g
neighbor.parent = current_node
else:
neighbor.g = new_g
neighbor.h = new_h
neighbor.parent = current_node
heapq.heappush(open_list, (new_f, neighbor))

return None

vertices = ['A', 'B', 'C', 'D', 'E']


edges = {'A': ['B', 'C'],
'B': ['A', 'D'],
'C': ['A', 'D', 'E'],
'D': ['B', 'C', 'E'],
'E': ['C', 'D']}
graph = Graph(vertices, edges)
start = 'A'
goal = 'E'
heuristic = lambda n, goal: 1 # Example heuristic function, always returns 1

print("A* Search:")
print(astar_search(graph, start, goal, heuristic))
-----------------------------------------------------------------------------------
-------------------------------------
2 b)
import heapq

# Define the Node class


class Node:
def __init__(self, state, g_value, h_value, parent):
self.state = state
self.g_value = g_value
self.h_value = h_value
self.parent = parent

def __lt__(self, other):


return (self.g_value + self.h_value) < (other.g_value + other.h_value)

# Define the heuristic function


def heuristic(state, goal):
return abs(state[0] - goal[0]) + abs(state[1] - goal[1])

# Define the MA* search function


def ma_star_search(start, goal, memory_limit):
open_list = []
closed_set = set()
heapq.heappush(open_list, Node(start, 0, heuristic(start, goal), None))
while open_list:
current = heapq.heappop(open_list)
if current.state == goal:
path = []
while current:
path.append(current.state)
current = current.parent
return list(reversed(path))
closed_set.add(current.state)
for successor in get_successors(current.state):
if successor in closed_set:
continue
g_value = current.g_value + 1
h_value = heuristic(successor, goal)
new_node = Node(successor, g_value, h_value, current)
if len(open_list) < memory_limit:
heapq.heappush(open_list, new_node)
else:
max_node = max(open_list)
if new_node < max_node:
heapq.heappush(open_list, new_node)
open_list.remove(max_node)
closed_set.remove(max_node.state)
return None

# Define the get_successors function


def get_successors(state):
successors = []
x, y = state
if x > 0:
successors.append((x-1, y))
if x < 2:
successors.append((x+1, y))
if y > 0:
successors.append((x, y-1))
if y < 2:
successors.append((x, y+1))
return successors

# Define the start and goal


start = (0, 0)
goal = (2, 2)

# Run the MA* search algorithm


path = ma_star_search(start, goal, 3)

# Print the path


print("MA* Search Path:", path)
-----------------------------------------------------------------------------------
-------------------------------------
3 )
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math

def accuracy_score(y_true, y_pred):


""" score = (y_true - y_pred) / len(y_true) """
return round(float(sum(y_pred == y_true))/float(len(y_true)) * 100 ,2)
def pre_processing(df):
""" partitioning data into features and target """
X = df.drop([df.columns[-1]], axis=1)
y = df[df.columns[-1]]
return X, y

class NaiveBayes:
def __init__(self):
self.features = list
self.likelihoods = {}
self.class_priors = {}
self.pred_priors = {}
self.X_train = np.array
self.y_train = np.array
self.train_size = int
self.num_feats = int

def fit(self, X, y):


self.features = list(X.columns)
self.X_train = X
self.y_train = y
self.train_size = X.shape[0]
self.num_feats = X.shape[1]
for feature in self.features:
self.likelihoods[feature] = {}
self.pred_priors[feature] = {}
for feat in self.features:
for feat_val in np.unique(self.X_train[feat]):
self.pred_priors[feat].update({feat_val: 0})
for outcome in np.unique(self.y_train):
self.likelihoods[feature].update({feat_val+'_'+outcome:0})
self.class_priors.update({outcome: 0})
self._calc_class_prior()
self._calc_likelihoods()
self._calc_predictor_prior()

def _calc_class_prior(self):
""" P(c) - Prior Class Probability """
for outcome in np.unique(self.y_train):
outcome_count = sum(self.y_train == outcome)
self.class_priors[outcome] = outcome_count / self.train_size

def _calc_likelihoods(self):
""" P(x|c) - Likelihood """
for feature in self.features:
for outcome in np.unique(self.y_train):
outcome_count = sum(self.y_train == outcome)
feat_likelihood = self.X_train[feature][self.y_train[self.y_train
== outcome].index.values.tolist()].value_counts().to_dict()
for feat_val, count in feat_likelihood.items():
self.likelihoods[feature][feat_val + '_' + outcome] =
count/outcome_count

def _calc_predictor_prior(self):
""" P(x) - Evidence """
for feature in self.features:
feat_vals = self.X_train[feature].value_counts().to_dict()
for feat_val, count in feat_vals.items():
self.pred_priors[feature][feat_val] = count/self.train_size
def predict(self, X):
""" Calculates Posterior probability P(c|x) """
results = []
X = np.array(X)
for query in X:
probs_outcome = {}
for outcome in np.unique(self.y_train):
prior = self.class_priors[outcome]
likelihood = 1
evidence = 1
for feat, feat_val in zip(self.features, query):
likelihood *= self.likelihoods[feat][feat_val + '_' + outcome]
evidence *= self.pred_priors[feat][feat_val]
posterior = (likelihood * prior) / (evidence)
probs_outcome[outcome] = posterior
result = max(probs_outcome, key=lambda x: probs_outcome[x])
results.append(result)
return np.array(results)

if __name__ == "__main__":
# Weather Dataset
print("\nWeather Dataset:")
# Specify the path to your file in Google Drive
df = pd.read_table("/content/drive/MyDrive/TENNISDATACV/tennisdata.csv")
# Split features and target
X, y = pre_processing(df)
nb_clf = NaiveBayes()
nb_clf.fit(X, y)
print("Train Accuracy: {}".format(accuracy_score(y, nb_clf.predict(X))))
# Query 1:
query = np.array([['Rainy','Mild', 'Normal', 't']])
print("Query 1:- {} ---> {}".format(query, nb_clf.predict(query)))
# Query 2:
query = np.array([['Overcast','Cool', 'Normal', 't']])
print("Query 2:- {} ---> {}".format(query, nb_clf.predict(query)))
# Query 3:
query = np.array([['Sunny','Hot', 'High', 't']])
print("Query 3:- {} ---> {}".format(query, nb_clf.predict(query)))
-----------------------------------------------------------------------------------
-------------------------------------
4 )
from pgmpy.models import BayesianModel
from pgmpy.factors.discrete import TabularCPD

# Defining the model structure. We can define the network by just passing a list of
edges.
model = BayesianModel([('D', 'G'), ('I', 'G'), ('G', 'L'), ('I', 'S')])

# Defining individual CPDs.


cpd_d = TabularCPD(variable='D', variable_card=2, values=[[0.6], [0.4]])
cpd_i = TabularCPD(variable='I', variable_card=2, values=[[0.7], [0.3]])
# represents P(grade|diff, intel)
cpd_g = TabularCPD(variable='G', variable_card=3,
values=[[0.3, 0.05, 0.9, 0.5],
[0.4, 0.25, 0.08, 0.3],
[0.3, 0.7, 0.02, 0.2]],
evidence=['I', 'D'],
evidence_card=[2, 2])
cpd_l = TabularCPD(variable='L', variable_card=2,
values=[[0.1, 0.4, 0.99],
[0.9, 0.6, 0.01]],
evidence=['G'],
evidence_card=[3])
cpd_s = TabularCPD(variable='S', variable_card=2,
values=[[0.95, 0.2],
[0.05, 0.8]],
evidence=['I'],
evidence_card=[2])

# Associating the CPDs with the network


model.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)

# Check model checks for the network structure and CPDs and verifies that the CPDs
are correctly defined and sum to 1.
model.check_model()
-----------------------------------------------------------------------------------
-------------------------------------
5 a)
import matplotlib.pyplot as plt
from scipy import stats

x = [89, 43, 36, 36, 95, 10, 66, 34, 38, 20, 26, 29, 48, 64, 6, 5, 36, 66, 72, 40]
y = [21, 46, 3, 35, 67, 95, 53, 72, 58, 10, 26, 34, 90, 33, 38, 20, 56, 2, 47, 15]

slope, intercept, r, p, std_err = stats.linregress(x, y)

def myfunc(x):
return slope * x + intercept

mymodel = list(map(myfunc, x))

plt.scatter(x, y)
plt.plot(x, mymodel)
plt.show()
-----------------------------------------------------------------------------------
-------------------------------------
5b)
import numpy
import matplotlib.pyplot as plt

x = [1,2,3,5,6,7,8,9,10,12,13,14,15,16,18,19,21,22]
y = [100,90,80,60,60,55,60,65,70,70,75,76,78,79,90,99,99,100]

mymodel = numpy.poly1d(numpy.polyfit(x, y, 3))


myline = numpy.linspace(1, 22, 100)

plt.scatter(x, y)
plt.plot(myline, mymodel(myline))
plt.show()
-----------------------------------------------------------------------------------
-------------------------------------
5c)
import pandas as pd
from sklearn import linear_model

# Load data from CSV file


file_path = '/content/drive/My Drive/TENNISDATACV/cars.csv'
df = pd.read_csv(file_path)

# Separate features (X) and target variable (y)


X = df[['Weight', 'Volume']]
y = df['CO2']

# Train linear regression model


regr = linear_model.LinearRegression()
regr.fit(X, y)

# Predict CO2 emission for a car with weight 2300kg and volume 1300cm3
predictedCO2 = regr.predict([[2300, 1300]])
print(predictedCO2)

-----------------------------------------------------------------------------------
-------------------------------------
6)
import sys
import matplotlib
matplotlib.use('Agg')
import pandas as pd
from sklearn.tree import DecisionTreeClassifier, plot_tree
import matplotlib.pyplot as plt
from google.colab import drive

# Mount Google Drive


drive.mount('/content/drive')

# Specify the file path


file_path = '/content/drive/My Drive/data.csv'

df = pd.read_csv(file_path)

# Mapping categorical values to numerical values


d = {'UK': 0, 'USA': 1, 'N': 2}
df['Nationality'] = df['Nationality'].map(d)

d = {'YES': 1, 'NO': 0}
df['Go'] = df['Go'].map(d)

features = ['Age', 'Experience', 'Rank', 'Nationality']


X = df[features]
y = df['Go']

dtree = DecisionTreeClassifier()
dtree = dtree.fit(X, y)

# Visualize decision tree


plt.figure(figsize=(12, 8))
plot_tree(dtree, feature_names=features, class_names=['NO', 'YES'], filled=True)
plt.savefig('/content/drive/My Drive/decision_tree.png')
-----------------------------------------------------------------------------------
-------------------------------------
7)
# Step 1: Load the dataset
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
# Step 2: Preprocess the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Step 3: Split the data into training and testing sets


from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3,
random_state=42)

# Step 4: Define the SVM model


from sklearn.svm import SVC
svm_model = SVC(kernel='linear', C=1.0)

# Step 5: Train the SVM model


svm_model.fit(X_train, y_train)

# Step 6: Test the SVM model


y_pred = svm_model.predict(X_test)
from sklearn.metrics import accuracy_score
print("Accuracy: ", accuracy_score(y_test, y_pred))

# Step 7: Tune the hyperparameters


from sklearn.model_selection import GridSearchCV
param_grid = {'C': [0.1, 1, 10], 'kernel': ['linear', 'rbf']}
grid_search = GridSearchCV(SVC(), param_grid, cv=5)
grid_search.fit(X_train, y_train)

print("Best parameters: ", grid_search.best_params_)


-----------------------------------------------------------------------------------
-------------------------------------
8)
from sklearn.ensemble import VotingClassifier, BaggingClassifier,
AdaBoostClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# Load the iris dataset


data = load_iris()

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(data.data, data.target,
test_size=0.3, random_state=42)

# Define the base models


model1 = DecisionTreeClassifier(random_state=42)
model2 = LogisticRegression(random_state=42)
model3 = KNeighborsClassifier()

# Define the ensemble models


ensemble1 = BaggingClassifier(base_estimator=model1, n_estimators=10,
random_state=42)
ensemble2 = AdaBoostClassifier(base_estimator=model2, n_estimators=10,
random_state=42)
ensemble3 = GradientBoostingClassifier(n_estimators=10, random_state=42)

# Define the voting classifier


voting_clf = VotingClassifier(estimators=[('bagging', ensemble1), ('adaboost',
ensemble2), ('gradient_boosting', ensemble3)], voting='hard')

# Train the voting classifier


voting_clf.fit(X_train, y_train)

# Make predictions on the test set


y_pred = voting_clf.predict(X_test)

# Calculate the accuracy of the ensemble model


accuracy = accuracy_score(y_test, y_pred)
print('Accuracy: %.2f' % accuracy)
-----------------------------------------------------------------------------------
-------------------------------------
9)
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans

# Define the data points


x = [4, 5, 10, 4, 3, 11, 14, 6, 10, 12]
y = [21, 19, 24, 17, 16, 25, 24, 22, 21, 21]
data = list(zip(x, y))

# Calculate inertia for different number of clusters


inertias = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, n_init=10) # Explicitly set n_init to suppress
the warning
kmeans.fit(data)
inertias.append(kmeans.inertia_)

# Plot the Elbow Method


plt.plot(range(1, 11), inertias, marker='o')
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.show()
-----------------------------------------------------------------------------------
-------------------------------------
10)
import numpy as np
import pandas as pd
from pgmpy.models import BayesianModel
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.inference import VariableElimination
from google.colab import drive

# Load the dataset from Google Drive


heartDisease = pd.read_csv('/content/drive/My Drive/TENNISDATACV/heart.csv')
heartDisease = heartDisease.replace('?', np.nan)

# Display a few examples from the dataset


print('Few examples from the dataset are given below:')
print(heartDisease.head())
# Define the structure of the Bayesian Network
model = BayesianModel([
('age', 'trestbps'), ('age', 'fbs'), ('sex', 'trestbps'), ('exang',
'trestbps'),
('trestbps', 'target'), ('fbs', 'target'), ('target', 'restecg'),
('target', 'thalach'), ('target', 'chol')
])

# Learn Conditional Probability Distributions (CPDs) using Maximum Likelihood


Estimation
print('\nLearning CPD using Maximum likelihood estimators...')
model.fit(heartDisease, estimator=MaximumLikelihoodEstimator)

# Perform inference with Bayesian Network


HeartDisease_infer = VariableElimination(model)

# Query 1: Probability of Heart Disease given Age=30


print('\n1. Probability of Heart Disease given Age=30')
q = HeartDisease_infer.query(variables=['target'], evidence={'age': 57})
print(q.values)

# Query 2: Probability of Heart Disease given cholesterol=100


print('\n2. Probability of Heart Disease given cholesterol=100')
q = HeartDisease_infer.query(variables=['target'], evidence={'chol': 192})
print(q.values)

-----------------------------------------------------------------------------------
-------------------------------------
11)import numpy as np

def sigmoid(z):
return 1/(1+np.exp(-z))

def initialize_parameters(n_x, n_h, n_y):


np.random.seed(2)
w1 = np.random.randn(n_h, n_x)
b1 = np.zeros((n_h, 1))
w2 = np.random.randn(n_y, n_h)
b2 = np.zeros((n_y, 1))
parameters = {"w1": w1, "b1": b1, "w2": w2, "b2": b2}
return parameters

def forward_prop(x, parameters):


w1 = parameters["w1"]
b1 = parameters["b1"]
w2 = parameters["w2"]
b2 = parameters["b2"]
z1 = np.dot(w1, x) + b1
a1 = np.tanh(z1)
z2 = np.dot(w2, a1) + b2
a2 = sigmoid(z2)
cache = {"a1": a1, "a2": a2}
return a2, cache

def calculate_cost(a2, y):


m = y.shape[1] # number of examples
cost = -np.sum(np.multiply(y, np.log(a2)) + np.multiply(1-y, np.log(1-a2)))/m
cost = np.squeeze(cost)
return cost
def backward_prop(x, y, cache, parameters):
m = x.shape[1]
a1 = cache["a1"]
a2 = cache["a2"]
w2 = parameters["w2"]
dz2 = a2 - y
dw2 = np.dot(dz2, a1.T) / m
db2 = np.sum(dz2, axis=1, keepdims=True) / m
dz1 = np.multiply(np.dot(w2.T, dz2), 1 - np.power(a1, 2))
dw1 = np.dot(dz1, x.T) / m
db1 = np.sum(dz1, axis=1, keepdims=True) / m
grads = {"dw1": dw1, "db1": db1, "dw2": dw2, "db2": db2}
return grads

def update_parameters(parameters, grads, learning_rate):


w1 = parameters["w1"]
b1 = parameters["b1"]
w2 = parameters["w2"]
b2 = parameters["b2"]
dw1 = grads["dw1"]
db1 = grads["db1"]
dw2 = grads["dw2"]
db2 = grads["db2"]
w1 = w1 - learning_rate * dw1
b1 = b1 - learning_rate * db1
w2 = w2 - learning_rate * dw2
b2 = b2 - learning_rate * db2
new_parameters = {"w1": w1, "b1": b1, "w2": w2, "b2": b2}
return new_parameters

def model(x, y, n_x, n_h, n_y, num_of_iters, learning_rate):


parameters = initialize_parameters(n_x, n_h, n_y)
for i in range(0, num_of_iters+1):
a2, cache = forward_prop(x, parameters)
cost = calculate_cost(a2, y)
grads = backward_prop(x, y, cache, parameters)
parameters = update_parameters(parameters, grads, learning_rate)
if i % 100 == 0:
print('Cost after iteration {:d}: {:f}'.format(i, cost))
return parameters

def predict(x, parameters):


a2, cache = forward_prop(x, parameters)
yhat = a2
yhat = np.squeeze(yhat)
if yhat >= 0.5:
y_predict = 1
else:
y_predict = 0
return y_predict

x = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])


y = np.array([[0, 1, 1, 0]])
m = x.shape[1]
n_x = 2
n_h = 2
n_y = 1
num_of_iters = 1000
learning_rate = 0.3

trained_parameters = model(x, y, n_x, n_h, n_y, num_of_iters, learning_rate)

x_test = np.array([[1], [0]])


y_predict = predict(x_test, trained_parameters)
print("Neural Network prediction for example ({:d}, {:d}) is {:d}".format(x_test[0]
[0], x_test[1][0], y_predict))
-----------------------------------------------------------------------------------
-------------------------------------
12)
import tensorflow as tf
from tensorflow import keras

# Load the data


(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

# Preprocess the data


x_train = x_train.reshape((x_train.shape[0], 28 * 28)).astype('float32') / 255
x_test = x_test.reshape((x_test.shape[0], 28 * 28)).astype('float32') / 255
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)

# Define the model architecture


model = keras.models.Sequential([
keras.layers.Dense(128, activation='relu', input_shape=(28 * 28,)),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_test,
y_test))

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test accuracy:', test_acc)

-----------------------------------------------------------------------------------
-------------------------------------

You might also like