0% found this document useful (0 votes)
148 views

Aiml Lab Manual 2023

The document describes implementing various graph search algorithms like BFS, DFS, A*, and memory-bounded A* on sample graph data. It includes Python code implementing BFS and DFS, traversing a graph represented as an adjacency list and printing the node search order. Code for A* search is also provided, using a heuristic function to find the lowest cost path between nodes.

Uploaded by

shamilie17
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
148 views

Aiml Lab Manual 2023

The document describes implementing various graph search algorithms like BFS, DFS, A*, and memory-bounded A* on sample graph data. It includes Python code implementing BFS and DFS, traversing a graph represented as an adjacency list and printing the node search order. Code for A* search is also provided, using a heuristic function to find the lowest cost path between nodes.

Uploaded by

shamilie17
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 17

EX.

NO: 1 Implementation of Uninformed search algorithms (BFS, DFS)

GRAPH:

PROGRAM: BFS

graph = {
'1': ['2','3','4'],
'2': ['5', '6'],
'3':[],
'5': ['9','10'],
'4': ['7','8'],
'7': ['11','12'],
'6':[],
'8':[],
'9':[],
'10':[],
'11':[],
'12':[]
}
visited = [] # List for visited nodes.
queue = [] #Initialize a queue

def bfs(visited, graph, node): #function for BFS


visited.append(node)
queue.append(node)

while queue: # Creating loop to visit each node


m = queue.pop(0)
print (m, end = " ")

for neighbour in graph[m]:


if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)

# Driver Code
print("Following is the Breadth-First Search")
bfs(visited, graph, '5') # function calling

OUTPUT:
Following is the Breadth-First Search
1 2 3 4 5 6 7 8 9 10 11 12
GRAPH:

PROGRAM: DFS

# Using a Python dictionary to act as an adjacency list


graph = {
'1': ['2','3','4'],
'2': ['5', '6'],
'3':[],
'5': ['9','10'],
'4': ['7','8'],
'7': ['11','12'],
'6':[],
'8':[],
'9':[],
'10':[],
'11':[],
'12':[]
}
visited = set() # Set to keep track of visited nodes of graph.

def dfs(visited, graph, node): #function for dfs


if node not in visited:
print (node)
visited.add(node)
for neighbour in graph[node]:
dfs(visited, graph, neighbour)
# Driver Code
print("Following is the Depth-First Search")
dfs(visited, graph, '5')

OUTPUT:
Following is the Depth-First Search
1
2
5
9
10
6
3
4
7
11
12
8
EX.NO: 2 Implementation of Informed search algorithms (A*,
memory-bounded A*)
GRAPH: A*

PROGRAM:
def aStarAlgo(start_node, stop_node):

open_set = set(start_node)

closed_set = set()

g = {} #store distance from starting node

parents = {} # parents contains an adjacency map of all nodes

#distance of starting node from itself is zero

g[start_node] = 0

#start_node is root node i.e it has no parent nodes

#so start_node is set to its own parent node

parents[start_node] = start_node

while len(open_set) > 0:

n = None

#node with lowest f() is found

for v in open_set:

if n == None or g[v] + heuristic(v) < g[n] + heuristic(n):

n=v

if n == stop_node or Graph_nodes[n] == None:

pass

else:

for (m, weight) in get_neighbors(n):

#nodes 'm' not in first and last set are added to first
#n is set its parent

if m not in open_set and m not in closed_set:

open_set.add(m)

parents[m] = n

g[m] = g[n] + weight

#for each node m,compare its distance from start i.e g(m) to the

#from start through n node

else:

if g[m] > g[n] + weight:

#update g(m)

g[m] = g[n] + weight

#change parent of m to n

parents[m] = n

#if m in closed set,remove and add to open

if m in closed_set:

closed_set.remove(m)

open_set.add(m)

if n == None:

print('Path does not exist!')

return None

# if the current node is the stop_node

# then we begin reconstructin the path from it to the start_node

if n == stop_node:

path = []

while parents[n] != n:

path.append(n)

n = parents[n]

path.append(start_node)

path.reverse()

print('Path found: {}'.format(path))

return path

# remove n from the open_list, and add it to closed_list

# because all of his neighbors were inspected

open_set.remove(n)

closed_set.add(n)
print('Path does not exist!')

return None

#define fuction to return neighbor and its distance

#from the passed node

def get_neighbors(v):

if v in Graph_nodes:

return Graph_nodes[v]

else:

return None

def heuristic(n):

H_dist = {

'A': 11,

'B': 6,

'C': 99,

'D': 1,

'E': 7,

'G': 0,

return H_dist[n]

#Describe your graph here

Graph_nodes = {

'A': [('B', 2), ('E', 3)],

'B': [('A', 2), ('C', 1), ('G', 9)],

'C': [('B', 1)],

'D': [('E', 6), ('G', 1)],

'E': [('A', 3), ('D', 6)],

'G': [('B', 9), ('D', 1)]

aStarAlgo('A', 'G')

OUTPUT:

Path found: ['A', 'E', 'D', 'G']

['A', 'E', 'D', 'G']


EX.NO:4 Implementing Naive Bayes algorithm from scratch
using Python

PROGRAM:
# Importing library
import math
import random
import csv

# the categorical class names are changed to numberic data


# eg: yes and no encoded to 1 and 0
def encode_class(mydata):
classes = []
for i in range(len(mydata)):
if mydata[i][-1] not in classes:
classes.append(mydata[i][-1])
for i in range(len(classes)):
for j in range(len(mydata)):
if mydata[j][-1] == classes[i]:
mydata[j][-1] = i
return mydata

# Splitting the data


def splitting(mydata, ratio):
train_num = int(len(mydata) * ratio)
train = []
# initially testset will have all the dataset
test = list(mydata)
while len(train) < train_num:
# index generated randomly from range 0
# to length of testset
index = random.randrange(len(test))
# from testset, pop data rows and put it in train
train.append(test.pop(index))
return train, test

# Group the data rows under each class yes or


# no in dictionary eg: dict[yes] and dict[no]
def groupUnderClass(mydata):
dict = {}
for i in range(len(mydata)):
if (mydata[i][-1] not in dict):
dict[mydata[i][-1]] = []
dict[mydata[i][-1]].append(mydata[i])
return dict

# Calculating Mean
def mean(numbers):
return sum(numbers) / float(len(numbers))

# Calculating Standard Deviation


def std_dev(numbers):
avg = mean(numbers)
variance = sum([pow(x - avg, 2) for x in numbers]) /
float(len(numbers) - 1)
return math.sqrt(variance)

def MeanAndStdDev(mydata):
info = [(mean(attribute), std_dev(attribute)) for attribute in
zip(*mydata)]
# eg: list = [ [a, b, c], [m, n, o], [x, y, z]]
# here mean of 1st attribute =(a + m+x), mean of 2nd attribute
= (b + n+y)/3
# delete summaries of last class
del info[-1]
return info

# find Mean and Standard Deviation under each class


def MeanAndStdDevForClass(mydata):
info = {}
dict = groupUnderClass(mydata)
for classValue, instances in dict.items():
info[classValue] = MeanAndStdDev(instances)
return info

# Calculate Gaussian Probability Density Function


def calculateGaussianProbability(x, mean, stdev):
expo = math.exp(-(math.pow(x - mean, 2) / (2 *
math.pow(stdev, 2))))
return (1 / (math.sqrt(2 * math.pi) * stdev)) * expo

# Calculate Class Probabilities


def calculateClassProbabilities(info, test):
probabilities = {}
for classValue, classSummaries in info.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, std_dev = classSummaries[i]
x = test[i]
probabilities[classValue] *=
calculateGaussianProbability(x, mean, std_dev)
return probabilities

# Make prediction - highest probability is the prediction


def predict(info, test):
probabilities = calculateClassProbabilities(info, test)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
# returns predictions for a set of examples
def getPredictions(info, test):
predictions = []
for i in range(len(test)):
result = predict(info, test[i])
predictions.append(result)
return predictions

# Accuracy score
def accuracy_rate(test, predictions):
correct = 0
for i in range(len(test)):
if test[i][-1] == predictions[i]:
correct += 1
return (correct / float(len(test))) * 100.0

# driver code

# add the data path in your system


filename = r'E:\user\MACHINE LEARNING\machine learning algos\
Naive bayes\filedata.csv'

# load the file and store it in mydata list


mydata = csv.reader(open(filename, "rt"))
mydata = list(mydata)
mydata = encode_class(mydata)
for i in range(len(mydata)):
mydata[i] = [float(x) for x in mydata[i]]

# split ratio = 0.7


# 70% of data is training data and 30% is test data used for testing
ratio = 0.7
train_data, test_data = splitting(mydata, ratio)
print('Total number of examples are: ', len(mydata))
print('Out of these, training examples are: ', len(train_data))
print("Test examples are: ", len(test_data))

# prepare model
info = MeanAndStdDevForClass(train_data)

# test model
predictions = getPredictions(info, test_data)
accuracy = accuracy_rate(test_data, predictions)
print("Accuracy of your model is: ", accuracy)
OUTPUT:
Total number of examples are: 200
Out of these, training examples are: 140
Test examples are: 60
Accuracy of your model is: 71.2376788
EX.NO:5 Implement Bayesian Networks

PROGRAM:
import numpy as np
import pandas as pd
import csv
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination

heartDisease = pd.read_csv('heart.csv')
heartDisease = heartDisease.replace('?',np.nan)

print('Sample instances from the dataset are given below')


print(heartDisease.head())

print('\n Attributes and datatypes')


print(heartDisease.dtypes)

model= BayesianModel([('age','heartdisease'),('sex','heartdisease'),
('exang','heartdisease'),('cp','heartdisease'),('heartdisease','restecg'),
('heartdisease','chol')])
print('\nLearning CPD using Maximum likelihood estimators')
model.fit(heartDisease,estimator=MaximumLikelihoodEstimator)

print('\n Inferencing with Bayesian Network:')


HeartDiseasetest_infer = VariableElimination(model)
print('\n 1. Probability of HeartDisease given evidence= restecg')
q1=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence
={'restecg':1})
print(q1)

print('\n 2. Probability of HeartDisease given evidence= cp ')


q2=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence
={'cp':2})
print(q2)

OUTPUT:
EX.NO:6 Build Regression models

PROGRAM: LINEAR REGRESSION MODELS


import numpy as np
import matplotlib.pyplot as plt

def estimate_coef(x, y):


# number of observations/points
n = np.size(x)

# mean of x and y vector


m_x = np.mean(x)
m_y = np.mean(y)

# calculating cross-deviation and deviation about x


SS_xy = np.sum(y*x) - n*m_y*m_x
SS_xx = np.sum(x*x) - n*m_x*m_x

# calculating regression coefficients


b_1 = SS_xy / SS_xx
b_0 = m_y - b_1*m_x

return (b_0, b_1)

def plot_regression_line(x, y, b):


# plotting the actual points as scatter plot
plt.scatter(x, y, color = "m",
marker = "o", s = 30)

# predicted response vector


y_pred = b[0] + b[1]*x

# plotting the regression line


plt.plot(x, y_pred, color = "g")

# putting labels
plt.xlabel('x')
plt.ylabel('y')

# function to show plot


plt.show()

def main():
# observations / data
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y = np.array([1, 3, 2, 5, 7, 8, 8, 9, 10, 12])

# estimating coefficients
b = estimate_coef(x, y)
print("Estimated coefficients:\nb_0 = {} \
\nb_1 = {}".format(b[0], b[1]))

# plotting regression line


plot_regression_line(x, y, b)
if __name__ == "__main__":
main()

OUTPUT:
Estimated coefficients:
b_0 = 1.2363636363636363
b_1 = 1.1696969696969697

You might also like