Lösungen Zu Den Exercises AI Python
Lösungen Zu Den Exercises AI Python
Exercise 1.2: Take the following list and write a program that
prints out all the elements of the list that are less than 5.
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
Exercise 1.3: Take the following two lists and write a program that
returns a list that contains all elements of the lists (without
duplicates). Make sure your program works on two lists of
different sizes. Moreover, try to find a 1-line-solution (using sets).
import random
a = random.sample(range(1, 100), 5)
b = random.sample(range(1, 100), 12)
print(a)
print(b)
# function to merge and sort two lists
def merge_and_sort_lists(x,y):
merged_list = list(set(x+y))
merged_list.sort()
return merged_list
print(merge_and_sort_lists(a,b))
dic4 = {}
for x in (dic1, dic2, dic3):
dic4.update(x)
print (dic4)
df['open'].describe()
Exercise 4.1: Build up the following family tree given the following
description. Define the following relationships (always check if 'x'
is related to 'y', e.g. is 'x' the father of 'y'): father(x,y) and
mother(x,y) as basic relationships; parent(x,y), gandparent(x,y),
sibling(x,y), uncleOrAunt(x,y).
# import libraries
import json
from kanren import Relation, facts, run, eq, membero, var, conde
# read in data from a JSON file (or create the knowledge base manually...)
with open('04_relationships.json') as f:
d = json.loads(f.read())
# John's children
name = 'John'
output = run(0, x, father(name, x))
print("\nList of " + name + "'s children:")
for item in output:
print(item)
# William's mother
name = 'William'
output = run(0, x, mother(x, name))[0]
print("\n" + name + "'s mother:\n" + output)
# Adam's parents
name = 'Adam'
output = run(0, x, parent(x, name))
print("\nList of " + name + "'s parents:")
for item in output:
print(item)
# Wayne's grandparents
name = 'Wayne'
output = run(0, x, grandparent(x, name))
print("\nList of " + name + "'s grandparents:")
for item in output:
print(item)
# David's siblings
name = 'David'
output = run(0, x, sibling(x, name))
siblings = [x for x in output if x != name]
print("\nList of " + name + "'s siblings:")
for item in siblings:
print(item)
# All spouses
a, b, c = var(), var(), var()
output = run(0, (a, b), (father, a, c), (mother, b, c))
print("\nList of all spouses:")
for item in output:
Exercise 5.2: Run the four algorithms on the following graph (due
to the visited flag the search techniques also work on graphs).
Analyze runtime and memory usage for the three uninformed
search algorithms.
Only shown for Depth First Search! Can be applied to the other search algorithms
accordingly...
# import matplotlib
import matplotlib.pyplot as plt
ba = nx.barabasi_albert_graph(100, 5)
# from UCS to A*
class Graph:
def __init__(self):
self.edges = {
'A': ['B', 'C'],
'B': ['D', 'A'],
'C': ['A'],
'D': ['B', 'E'],
'E': []
}
self.weights = {
'AB': 0.4,
'AC': 0.6,
'BA': 0.2,
'BD': 0.8,
'DE': 0.5
}
Exercise 6.1: Alter the initial state of the 8-puzzle and the maze -
experiment with the two solvers! Find a configuration for each
example for which the task is not solvable. What happens?
Such scenarios simply lead to an error...
for y in range(len(self.board)):
for x in range(len(self.board[y])):
if self.board[y][x].lower() == "o":
self.initial = (x, y)
elif self.board[y][x].lower() == "x":
self.goal = (x, y)
super(MazeSolver, self).__init__(initial_state=self.initial)
return actions
if action.count("up"):
y -= 1
if action.count("down"):
y += 1
if action.count("left"):
x -= 1
if action.count("right"):
x += 1
new_state = (x, y)
return new_state
# P(A)
p_a = 0.0002
# P(B|A)
p_b_given_a = 0.85
# P(B|not A)
p_b_given_not_a = 0.05
# calculate P(A|B)
result = bayes_theorem(p_a, p_b_given_a, p_b_given_not_a)
# summarize
# likelihoods
# P(light | sleep)
light_sleep = 0.01
# P(-light | sleep)
nolight_sleep = 0.99
# P(light | -sleep)
light_nosleep = 0.8
# P(-light | -sleep)
nolight_nosleep = 0.2
# P(phone | sleep)
phone_sleep = 0.95
# P(-phone | sleep)
nophone_sleep = 0.05
# P(phone | -sleep)
phone_nosleep = 0.25
# P(-phone | -sleep)
nophone_nosleep = 0.75
elif light == 1:
l_likelihood = light_sleep
l_non_likelihood = light_nosleep
l_status= 'ON'
if phone == 0:
p_likelihood = nophone_sleep
p_non_likelihood = nophone_nosleep
p_status = 'NOT charging'
elif phone == 1:
p_likelihood = phone_sleep
p_non_likelihood = phone_nosleep
p_status = 'charging'
if type(time_offset) == int:
time = pd.datetime(2017, 1, 1, 10, 0, 0)
new_time = str((time + pd.DateOffset(minutes = time_offset)).time())
return conditional_probability
light_phone = add_update_probability(time_est, 1, 1)
nolight_phone = add_update_probability(time_est, 0, 1)
light_nophone = add_update_probability(time_est, 1, 0)
nolight_nophone = add_update_probability(time_est, 0, 0)
figsize(18, 8)
Exercise 8.1: Make use of a Bayesian Network for the Monty Hall
problem!
# Import required packages;
# requires: pip install pomegranate
import math
from pomegranate import *
# The door Monty picks, depends on the choice of the guest and the prize door
monty = ConditionalProbabilityTable(
[[ 'A', 'A', 'A', 0.0 ],
[ 'A', 'A', 'B', 0.5 ],
[ 'A', 'A', 'C', 0.5 ],
[ 'A', 'B', 'A', 0.0 ],
[ 'A', 'B', 'B', 0.0 ],
[ 'A', 'B', 'C', 1.0 ],
[ 'A', 'C', 'A', 0.0 ],
[ 'A', 'C', 'B', 1.0 ],
[ 'A', 'C', 'C', 0.0 ],
[ 'B', 'A', 'A', 0.0 ],
[ 'B', 'A', 'B', 0.0 ],
[ 'B', 'A', 'C', 1.0 ],
[ 'B', 'B', 'A', 0.5 ],
[ 'B', 'B', 'B', 0.0 ],
[ 'B', 'B', 'C', 0.5 ],
[ 'B', 'C', 'A', 1.0 ],
[ 'B', 'C', 'B', 0.0 ],
[ 'B', 'C', 'C', 0.0 ],
[ 'C', 'A', 'A', 0.0 ],
[ 'C', 'A', 'B', 1.0 ],
[ 'C', 'A', 'C', 0.0 ],
[ 'C', 'B', 'A', 1.0 ],
[ 'C', 'B', 'B', 0.0 ],
[ 'C', 'B', 'C', 0.0 ],
[ 'C', 'C', 'A', 0.5 ],
[ 'C', 'C', 'B', 0.5 ],
[ 'C', 'C', 'C', 0.0 ]], [guest, prize] )
network.bake()
# What are the odds (A, B, C) for both players if guest decides for door B?
beliefs = network.predict_proba({ 'guest' : 'B' })
beliefs = map(str, beliefs)
print("\n".join( "{}\t{}".format( state.name, belief ) for state, belief in zip( network.states,
beliefs ) ))
# What are the odds (A, B, C) for both players if the guest decides for door A and Monty for
door B?
beliefs = network.predict_proba({'guest' : 'A', 'monty' : 'B'})
print("\n".join( "{}\t{}".format( state.name, str(belief) ) for state, belief in zip( network.states,
beliefs )))
# this is how we realize the Bayesian network: raining -> parking space av <- working day
model = BayesianModel([('raining', 'available'), ('working', 'available')])
# plot it
from networkx import nx
%matplotlib inline
nx.draw(model)
X, y = twospirals(1000)
plt.title('training set')
plt.plot(X[y==0,0], X[y==0,1], '.', label='hot dog')
plt.plot(X[y==1,0], X[y==1,1], '.', label='not hot dog')
plt.legend()
plt.show()
# we set k to 7
n_neighbors = 7
Exercise 9.2: Train, evaluate and visualize a decision tree for the
following data-set.
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
classifier.predict(features)
scores.append(score)
# Plot silhouette scores
plt.figure()
plt.bar(values, scores, width=0.7, color='black', align='center')
plt.title('Silhouette score vs number of clusters')
plt.show()
def sigmoid(x):
return 1.0/(1+ np.exp(-x))
def sigmoid_derivative(x):
return x * (1.0 - x)
def linear(x):
return x
def linear_derivative(x):
return 1
def tanH(x):
return np.tanh(x)
def tanH_derivative(x):
return 1 - tanH(x)**2
class NeuralNetwork:
def __init__(self, x, y, hidden_neurons, act, act_derivative):
self.input =x
self.weights1 = np.random.rand(self.input.shape[1],hidden_neurons)
self.weights2 = np.random.rand(hidden_neurons,1)
self.y =y
self.output = np.zeros(self.y.shape)
self.act = act
self.act_derivative = act_derivative
def feedforward(self):
self.layer1 = self.act(np.dot(self.input, self.weights1))
self.output = self.act(np.dot(self.layer1, self.weights2))
def backprop(self):
# application of the chain rule to find derivative of the loss function with respect to
weights2 and weights1
d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) *
self.act_derivative(self.output)))
d_weights1 = np.dot(self.input.T, (np.dot(2*(self.y - self.output) *
self.act_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))
# update the weights with the derivative (slope) of the loss function
self.weights1 += d_weights1
self.weights2 += d_weights2
# input vectors
X = np.array([[0,0,1],
[1,1,0],
[1,0,1],
[0,1,1],
[1,1,1]])
if i == 100:
nn.print_debug(i)
nn.print_debug(i)
print("\n\nSigmoid:")
run_tests(sigmoid, sigmoid_derivative)
print("\n\nLinear:")
run_tests(linear, linear_derivative)
print("\n\ntanH:")
run_tests(tanH, tanH_derivative)
plt.title('training set')
plt.plot(X[y==0,0], X[y==0,1], '.', label='hot dog')
plt.plot(X[y==1,0], X[y==1,1], '.', label='not hot dog')
plt.legend()
plt.show()
y = y.reshape((X.shape[0], 1))
plt.show()
Exercise 11.3: Build a single layer neural network and train it with
the data from the last exercise (X = input, y = labels). What do you
observe?
# Define minimum and maximum values for each dimension
dim1_min, dim1_max, dim2_min, dim2_max = min(X[:, 0]), max(X[:, 0]), min(X[:, 1]), max(X[:,
1])
y = y.reshape((X.shape[0], 1))
plt.show()
Exercise 11.4: Figure out how to change the learning rate and
examine if you can improve the training (e.g. less oscillation
effects in the error rate)?
import numpy as np
import matplotlib.pyplot as plt
import neurolab as nl
plt.title('Input data')
plt.show()
X, y = twospirals(1000)
plt.title('training set')
plt.plot(X[y==0,0], X[y==0,1], '.', label='hot dog')
plt.plot(X[y==1,0], X[y==1,1], '.', label='not hot dog')
plt.legend()
plt.show()
y = y.reshape((X.shape[0], 1))
# Define a multilayer neural network with 3 hidden layers (seems to work best);
nn = nl.net.newff([dim1, dim2], [10, 20, 4, num_output])
# This train algorithm based on spipy.optimize seems to give the best results
nn.trainf = nl.train.train_cg
plt.show()