0% found this document useful (0 votes)
59 views57 pages

Soft Computing

This document contains 4 programming assignments related to graph search algorithms: 1. Breadth-first search (BFS) implemented in C using queues. 2. Depth-first search (DFS) implemented in C using recursion on a binary tree. 3. A* search algorithm implemented to find the lowest-cost path between nodes using heuristics. 4. N-Queens problem solved using backtracking recursion to place queens on a chessboard without any attacking each other.

Uploaded by

Nitesh Jain
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
59 views57 pages

Soft Computing

This document contains 4 programming assignments related to graph search algorithms: 1. Breadth-first search (BFS) implemented in C using queues. 2. Depth-first search (DFS) implemented in C using recursion on a binary tree. 3. A* search algorithm implemented to find the lowest-cost path between nodes using heuristics. 4. N-Queens problem solved using backtracking recursion to place queens on a chessboard without any attacking each other.

Uploaded by

Nitesh Jain
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 57

Sushila Devi Bansal College Of Technology, Indore

CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

1. WAP to implement breadth first search algorithm in C.


/*Program that implements breadth first search algorithm*/
#include <stdio.h>
#include <conio.h>
#include <alloc.h>
#define TRUE 1
#define FALSE 0
#define MAX 8
struct node
{
int data ;
struct node *next ;
};
int visited[MAX] ;
int q[8] ;
int front, rear ;
void bfs ( int, struct node ** ) ;
struct node * getnode_write ( int ) ;
void addqueue ( int ) ;
int deletequeue( ) ;
int isempty( ) ;
void del ( struct node * ) ;
void main( )
{
struct node *arr[MAX] ;
struct node *v1, *v2, *v3, *v4 ;
int i ;
clrscr( ) ;
v1 = getnode_write ( 2 ) ;
arr[0] = v1 ;
v1 -> next = v2 = getnode_write ( 3 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 1 ) ;
arr[1] = v1 ;
v1 -> next = v2 = getnode_write ( 4 ) ;

Signature of Professor

v2 -> next = v3 = getnode_write ( 5 ) ;


v3 -> next = NULL ;
v1 = getnode_write ( 1 ) ;
arr[2] = v1 ;
v1 -> next = v2 = getnode_write ( 6 ) ;
v2 -> next = v3 = getnode_write ( 7 ) ;
v3 -> next = NULL ;
v1 = getnode_write ( 2 ) ;
arr[3] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 2 ) ;
arr[4] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 3 ) ;
arr[5] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 3 ) ;
arr[6] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 4 ) ;
arr[7] = v1 ;
v1 -> next = v2 = getnode_write ( 5 ) ;
v2 -> next = v3 = getnode_write ( 6 ) ;
v3 -> next = v4 = getnode_write ( 7 ) ;
v4 -> next = NULL ;
front = rear = -1 ;
bfs ( 1, arr ) ;
for ( i = 0 ; i < MAX ; i++ )
del ( arr[i] ) ;
getch( ) ;
}
void bfs ( int v, struct node **p )
{
struct node *u ;
visited[v - 1] = TRUE ;
printf ( "%d\t", v ) ;
addqueue ( v ) ;
while ( isempty( ) == FALSE )
{
v = deletequeue( ) ;
u=*(p+v-1);
while ( u != NULL )
{
if ( visited [ u -> data - 1 ] == FALSE )
{
addqueue ( u -> data ) ;
visited [ u -> data - 1 ] = TRUE ;

printf ( "%d\t", u -> data ) ;


}
u = u -> next ;
}
}
}
struct node * getnode_write ( int val )
{
struct node *newnode ;
newnode = ( struct node * ) malloc ( sizeof ( struct node ) ) ;
newnode -> data = val ;
return newnode ;
}
void addqueue ( int vertex )
{if ( rear == MAX - 1 )
{
printf ( "\nQueue Overflow." ) ;
exit( ) ;
}
rear++ ;
q[rear] = vertex ;
if ( front == -1 )
front = 0 ;
}
int deletequeue( )
{
int data ;
if ( front == -1 )
{
printf ( "\nQueue Underflow." ) ;
exit( ) ;
}
data = q[front] ;
if ( front == rear )
front = rear = -1 ;
else
front++ ;
return data ;
}
int isempty( )
{
if ( front == -1 )
return TRUE ;
return FALSE ;
}
void del ( struct node *n )
{
struct node *temp ;
while ( n != NULL )
{
temp = n -> next ;

free ( n ) ;
n = temp ;
}
}

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

2-WAP to implement depth first search algorithm in C.


#include <stdio.h>
typedef struct node {
int value;
struct node *right;
struct node *left;
} mynode;
mynode *root;
aadd_node(int value);
void levelOrderTraversal(mynode *root);
int main(int argc, char* argv[]) {
root = NULL;
add_node(5);
add_node(1);
add_node(-20);
add_node(100);
add_node(23);
add_node(67);
add_node(13);
printf("\n\n\nLEVEL ORDER TRAVERSAL\n\n");
levelOrderTraversal(root);
getch();
}
// Function to add a new node...
add_node(int value) {
mynode *prev, *cur, *temp;
temp = malloc(sizeof(mynode));
temp->value = value;
temp->right = NULL;
temp->left = NULL;
if(root == NULL) {
printf("\nCreating the root..\n");
root = temp;
return;
}

Signature of Professor

prev = NULL;
cur = root;
while(cur != NULL) {
prev = cur;
//cur = (value < cur->value) ? cur->left:cur->right;
if(value < cur->value) {
cur = cur->left;
} else {
cur = cur->right;
}
}
if(value < prev->value) {
prev->left = temp;
} else {
prev->right = temp;
}
}
// Level order traversal..
void levelOrderTraversal(mynode *root) {
mynode *queue[100] = {(mynode *)0}; // Important to initialize!
int size = 0;
int queue_pointer = 0;
while(root) {
printf("[%d] ", root->value);
if(root->left) {
queue[size++] = root->left;
}
if(root->right) {
queue[size++] = root->right;
}
root = queue[queue_pointer++];
}
}

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

3-WAP to implement A* search algorithm.


PriorityQueue OpenList
List ClosedList
startNode.g = 0;
startNode.h = EstimateCostToEndNode(startNode)
startNode.f = startNode.g + startNode.h
startNode.parent = null
Open.Insert(startNode)
while(OpenList is not empty)
//obtain the topmost element from the priority queue
Node node = Open.GetNode()
if(node == endNode)
return TRUE
for(each neighbour _succ of the node node)
newG = node.g + CalcCostFromNodeToNode(_succ, node);
if(examined _succ is on OpenList or ClosedList
and the new cost is >= than the previous)
analyse another neighbour
else
_succ.parent = node
_succ.g = newG
_succ.h = EstimateCostToEndNode(_succ)
_succ.f = _succ.g + _succ.h
if(_succ is already on ClosedList)
remove _succ from ClosedList
if(_succ isn't on OpenList yet)
add _succ to OpenList
ClosedList.Insert(node)
return FALSE

Signature of Professor

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

4- WAP to implement N-Queens Problem.


N-Queen's Problem
#include <iostream.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <conio.h>
void check(int, int, char [100][100]);
void print(char [100][100]);
int no_of_queens, queen = 2, flagrow = 0, flagcol = 0;
int count = 1;
char ch, response_row, response_col;
int main(void)
{
int row, col, i;
char board[100][100], response;
clrscr();
printf("
@@ This is n-queen problem.
Enter the number of
queens(say n)
and watch how computer places them
in (n x n) matrix
such that none can
meet another moving along horizontally,
vertically
or digonally.
");
printf("
Enter the number of queens : ");
scanf("%d", &no_of_queens);
if(no_of_queens > 23)
{
printf("
@@ Thought the program is OK

Signature of Professor

for any queen value.But


due the
configuration of the output screen
the output will be
tranketed
(A very large queen number may cause
the system stack
overflow).
So it is highly
recommended that you run the program
with
maximum queen number 23...");
printf("
Want to continue(Y/N)?");
fflush(stdin);
scanf("%c", &response);
if(toupper(response) == 'N')
return (0);
}
else if(no_of_queens < 3)
{
printf("The number of Queen must be greater than 3.");
getch();
return (0);
}
printf("Want a row number below the board(Y/N) : ");
fflush(stdin);
response_row = (char)getchar();
if(toupper(response_row) == 'Y')
flagrow = 1;
printf("Want a column number below the board(Y/N) : ");
fflush(stdin);
response_col = (char)getchar();
if(toupper(response_col) == 'Y')
flagcol = 1;
clrscr();
printf("M/c in work ! Please Wait...");
// This for-loop is used for checking all the columns of row 0 only...
_setcursortype(_NOCURSOR);
for(col = 0; col < no_of_queens; col++)
{
memset(board, '-', sizeof(board));
check( 0, col, board );
}
clrscr();
printf("Thank you for seeing this program through.");
getch();
return (0);
}
void check( int r, int c, char board[100][100] )
{

int i, j;
// Terminating condition for the recursion...
if ( ( r == no_of_queens ) && ( c == 0 ))
{
clrscr();
printf(" (%d-Queen) Set : %d
", no_of_queens, count++);
print( board );
fflush(stdin);
ch = (char)getch();
clrscr();
if(ch == 'e')
exit (0);
printf("M/c in work ! Please Wait...");
}
// Vertical check...
for(i = 0; i < r; i++)
{
if ( board[i][c] == queen)
return;
}
// Horizontal check...
for(j = 0; j < c; j++)
{
if ( board[r][j] == queen)
return;
}
// Left-Diagonal check...
i = r; j = c;
do
{
if ( board[i][j] == queen )
return;
i--; j--;
}
while( i >= 0 && j >= 0 );
// Right-Diagonal check...
i = r; j = c;
do
{
if ( board[i][j] == queen )
return;
i--; j++;
}
while( i >= 0 && j < no_of_queens );
// Placing the queen if the ckecked position is OK...
board[r][c] = queen;
r++;
// This for-loop is used for checking all the columns for each row
//starting from 1 upto the end...
for(int p = 0; p < no_of_queens; p++)

check(r, p, board);
for(int h = 0; h < no_of_queens; h++)
board[r - 1][h] = '-';
}
void print(char board[100][100])
{
for(int i = 0; i < no_of_queens; i++)
{
if(flagrow == 1)
printf("%3d", i + 1);
for(int j = 0; j < no_of_queens; j++)
{
if(board[i][j] == queen)
{
textcolor(RED);
cprintf("%3c", queen);
}
else
{
textcolor(8); //dark gray
cprintf("%3c", 22);
}
}
printf("
");
}
textcolor(7);
if(flagcol == 1)
{ if(flagrow)
printf(" ");
for(i = 0; i < no_of_queens; i++)
printf("%3d", i + 1);
}
gotoxy(62, 1);
printf("Press E to exit.");
textcolor(7);
}

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

Signature of Professor

5. Implement artificial neural network


Implementation:1. Gather the necessary libraries (or write them)
We need the following libraries:
A library that supports matrix algebra; and
A library that plots graphs (x versus y).
If you cant find a matrix library for your implementation language, then you can
write a simple library yourself. Since neural nets do not require matrix inverses or
long chains of matrix products, you need not worry (much) about numerical stability,
thus the implementation is straightforward.
The implementation needs to support the following matrix operations:
matrix transposition;
matrix addition;
matrix multiplication with a scalar;
ordinary matrix multiplication;
Hadamard multiplication (component-wise multiplication);
Kronecker multiplication (only necessary for between row and column
vectors) &horizontal matrix concatenation.
The first few operations are standard matrix operations, but you might be less
familiar with the last three.
Hadamard multiplication of matrices is defined for two matrices of equal
dimensions. Each component of the new matrix is the product of corresponding
components in the two multiplicands, that is:
Z[i][j] = X[i][j] * Y[i][j]
The Kronecker product of a row vector and column vector is defined as a matrix
whose components are given by:
Z[i][j] = X[0][i] * Y[j][0]
It is possible to define the product for arbitrary matrices, but we dont need it.
The horizontal concatenation combines two matrices with the same number of rows.

For example, the matrices A and B below will be concatenated to form the new
matrix C:
A simple implementation simply constructs a new matrix whose components are
given by
if j < X_width
Z[i][j] = X[i][j]
else
Z[i][j] = Y[i, j X_width]
If no graph libraries are available, simply write a function that will output a tabseparated
list of the input and output sequences to plot. You can then load or paste
this into your favourite spreadsheet program to make the necessary plots.
2. Implement Output and Class conversion functions
This is very simple: implement a function that converts an output matrix to a class
number vector, and another that converts a class number to an output vector.
For example, the output_to_class function will take the following matrix
100
010
001
100
001
and convert it to:
1
2
3
1
3
(The second function will convert the second matrix back to the first matrix).
3. Implement a function to read in data files
For this tutorial you can use the following three files:
iris_training.dat
iris_validation.dat
iris_test.dat
These three files contain samples from the ICU iris dataset, a simple and quite
famous dataset. In each file, samples or contained in rows. Each row has seven
entries, separated by tabs. The first four entries are features of irises (sepal length,
sepal width, petal length, and petal width); the last three is the outputs denoting the
species of iris (setosa, versicolor, and virginica). I have preprocessed the values a bit
to get them in the appropriate ranges.
You must read in the data so that you can treat the inputs of each set as a single
matrix; similarly for the outputs.

I find it useful to store all the data in a structure, like this:


data_set
o input_count
o output_count
o training_set
 inputs
 outputs
 classes
 count
 bias o validation_set
 inputs
 outputs
 classes
 count
 bias test_set
 inputs
 outputs
 classes
 count
 bias
This makes it more useful to send the data as parameters.
4. Implement an activation function and its derivative
The activation function must take in a matrix X, and return a matrix Y. Y is computed
by applying a function component-wise to X. For now, use the hyperbolic tangent
function:
The activation function derivative must similarly take in a matrix X, and return a
matrix Y. Y is computed by applying the derivative of the activation componentwise
to X. The derivative of the function above is:
5. Implement the feed-forward function
The function must take as arguments an input matrix, weight matrix, and a bias node
matrix.
The function should return an output matrix, and a net matrix
These are computed as follows:
net = mul(weights, horcat(inputs, bias))
output = activate(net)
The bias matrix is a constant column vector of 1s with as many rows as the input
matrix. This vector corresponds to the bias nodes. The implementation here is a bit
clumsy, but for now, the approach used here minimises the potential for error.
6. Implement a weight initialisation function
This function must take in a maximum weight, a width and height, and return a
matrix of the given width and height, randomly initialised in the range [-max_weight
max_weight].

7. Implement a function that evaluates the network error.


The function must take in:
an input matrix,
a weight matrix,
a target output matrix,
a target class matrix,
a bias matrix.
The function must return the error e, and the classification error c.
To compute these, first compute the output matrix Z using the feed-forward function
(you can ignore the net matrix).
[output net] = feedforward(inputs, weights, bias)
Now subtract the target output matrix from the output matrix, square the
components, add together, and normalise:
error = sum_all_components((target_outputs outputs)^2) ...
/ (sample_count * output_count)
From the output matrix, calculate the classes:
classes = classes_from_output_vectors(outputs)
Count the number of classes that corresponds with the target classes, and divide by
the number of samples to normalise:
c = sum_all_components(classes != target_classes)/sample_count
(Here, our inequality returns a matrix of 0s and 1s, with 1s in positions where the
corresponding components in classes and target_classes are not equal.)
8. Implement a dummy backpropagation function
The function should take in:
An input matrix
A weight matrix
a learning rate (eta, as in the Greek letter)
a bias vector
The function must return an updated weight matrix. For now, return W as is.
9. Implement the train function
The training function should take in three sets, the training_set, validation_set, and
test_set. Implement a way to limit the maximum number of samples that will
actually be used for training (you can also do his in the main program described in
the next section). This is very helpful for debugging purposes (especially if you plan
to later replace the backpropagation algorithm with something a little faster and
more complicated).
The function should return a weight matrix, and error values as floats.
Initialise a value plot_graphs to true. This is a debug flag, so it is appropriate to
implement this as a macro if it is supported by the implementation language.
The function should initialise a weight matrix using initialise weights. For now, use a
max_weight of 1/2.
The function should also construct three bias vectors bias_training, bias_validate,
and bias_test. Each must contain only 1s, with as many rows as there are inputs in

the training, validation and test sets respectively.


Implement a while loop that stops after 500 iterations. (We will change the while
condition later to something else, so do not use a for loop).
Inside the loop, call the backpropagation algorithm. Use the training set inputs, the
weights, (for now) a fixed learning rate of 0.1, and bias vector bias_train. Assign the
result to weights.
Still inside the loop, call the network error function three times: one time for each of
the training, validation, and test sets. Use the weight matrix, and the appropriate bias
vector. Wrap these calls in an if-statement that tests for a value plot_graphs. (If your
language supports it, you can use conditional compilation on the value of
plot_graphs).
Store the errors in six arrays (error_train, classification_error_train, etc.), with the
current epoch number as index.
After the loop, plot the six error arrays as a function of epoch number. Wrap this in
an if-statement (or conditional compilation statement) that tests for the value
plot_graphs.
Call the network error function again, on all three sets as before.
Return the weights, and the six errors.
10. Implement the main training program
The program should load in the sets (using the load_sets function), and pass these to
the training algorithm.
11. Run the program
The important thing is that everything should run. You should see your error plots; at
this stage they should be straight, horizontal lines. Because of the random weight
initialisation, we cannot predict where these lines will lie (so do not be alarmed if
they do not look exactly the same as below as long as they are straight and
horizontal).

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

6. WAP to implement image processing.


#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <conio.h>
void main( )
{
FILE *fp;
unsigned char i[100][100;
int m,n;
clrscr();
if((fp = fopen("lenna.dat","r+")) == 0)
{
printf("Just conat open the specified file.\n");
exit(1);
}
else
{
for(m=0;m<100;m++)
{
for(n=0;n<100;n++)
{
fscanf(fp,"%c",&i[m][n]);
}
}
fclose(fp);
getch();
}

Signature of Professor

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

Signature of Professor

7- Write a Program to implement ADALINE network for the purpose of


pattern recognition.
/*****************************************************************************
*
DECLARATIONS
******************************************************************************
/
#include <stdlib.h>
#include <stdio.h>
typedef int BOOL;
typedef char CHAR;
typedef int INT;
typedef double REAL;
#define FALSE 0
#define TRUE 1
#define NOT !
#define AND &&
#define OR ||
#define MIN(x,y) ((x)<(y) ? (x) : (y))
#define MAX(x,y) ((x)>(y) ? (x) : (y))
#define LO -1
#define HI +1
#define BIAS 1
#define sqr(x) ((x)*(x))
typedef struct { /* A LAYER OF A NET: */
INT Units; /* - number of units in this layer */
REAL* Activation; /* - activation of ith unit */
INT* Output; /* - output of ith unit */
REAL* Error; /* - error term of ith unit */
REAL** Weight; /* - connection weights to ith unit */
} LAYER;
typedef struct { /* A NET: */
LAYER* InputLayer; /* - input layer */
LAYER* OutputLayer; /* - output layer */
REAL Eta; /* - learning rate */

REAL Error; /* - total net error */


REAL Epsilon; /* - net error to terminate training */
} NET;
/*****************************************************************************
*
RANDOMSDRAWNFROMDISTRIBUTIONS
******************************************************************************
/
void InitializeRandoms()
{
srand(4711);
}
INT RandomEqualINT(INT Low, INT High)
{
return rand() % (High-Low+1) + Low;
}
REAL RandomEqualREAL(REAL Low, REAL High)
{
return ((REAL) rand() / RAND_MAX) * (High-Low) + Low;
}
/*****************************************************************************
*
APPLICATION-SPECIFICCODE
******************************************************************************
/
#define NUM_DATA 10
#define X 5
#define Y 7
#define N (X * Y)
#define M 10
CHAR Pattern[NUM_DATA][Y][X] = { { " OOO ",
"O O",
"O O",
"O O",
"O O",
"O O",
" OOO " },
{ " O ",
" OO ",
"O O ",
" O ",
" O ",
" O ",
" O " },
{ " OOO ",
"O O",
" O",
" O ",
" O ",
" O ",
"OOOOO" },

{ " OOO ",


"O O",
" O",
" OOO ",
" O",
"O O",
" OOO " },
{ " O ",
" OO ",
" O O ",
"O O ",
"OOOOO",
" O ",
" O " },
{ "OOOOO",
"O ",
"O ",
"OOOO ",
" O",
"O O",
" OOO " },
{ " OOO ",
"O O",
"O ",
"OOOO ",
"O O",
"O O",
" OOO " },
{ "OOOOO",
" O",
" O",
" O ",
" O ",
" O ",
"O " },
{ " OOO ",
"O O",
"O O",
" OOO ",
"O O",
"O O",
" OOO " },
{ " OOO ",
"O O",
"O O",
" OOOO",
" O",
"O O",
" OOO " } };
INT Input [NUM_DATA][N];
INT Output[NUM_DATA][M] =

{ {HI, LO, LO, LO, LO, LO, LO, LO, LO, LO},
{LO, HI, LO, LO, LO, LO, LO, LO, LO, LO},
{LO, LO, HI, LO, LO, LO, LO, LO, LO, LO},
{LO, LO, LO, HI, LO, LO, LO, LO, LO, LO},
{LO, LO, LO, LO, HI, LO, LO, LO, LO, LO},
{LO, LO, LO, LO, LO, HI, LO, LO, LO, LO},
{LO, LO, LO, LO, LO, LO, HI, LO, LO, LO},
{LO, LO, LO, LO, LO, LO, LO, HI, LO, LO},
{LO, LO, LO, LO, LO, LO, LO, LO, HI, LO},
{LO, LO, LO, LO, LO, LO, LO, LO, LO, HI} };
FILE* f;
void InitializeApplication(NET* Net)
{
INT n,i,j;
Net->Eta = 0.001;
Net->Epsilon = 0.0001;
for (n=0; n<NUM_DATA; n++) {
for (i=0; i<Y; i++) {
for (j=0; j<X; j++) {
Input[n][i*X+j] = (Pattern[n][i][j] == 'O') ? HI : LO;
}
}
}
f = fopen("ADALINE.txt", "w");
}
void WriteInput(NET* Net, INT* Input)
{
INT i;
for (i=0; i<N; i++) {
if (i%X == 0) {
fprintf(f, "\n");
}
fprintf(f, "%c", (Input[i] == HI) ? 'O' : ' ');
}
fprintf(f, " -> ");
}
void WriteOutput(NET* Net, INT* Output)
{
INT i;
INT Count, Index;
Count = 0;
for (i=0; i<M; i++) {
if (Output[i] == HI) {
Count++;
Index = i;
}
}
if (Count == 1)
fprintf(f, "%i\n", Index);
else
fprintf(f, "%s\n", "invalid");

}
void FinalizeApplication(NET* Net)
{
fclose(f);
}
/*****************************************************************************
*
INITIALIZATION
******************************************************************************
/
void GenerateNetwork(NET* Net)
{
INT i;
Net->InputLayer = (LAYER*) malloc(sizeof(LAYER));
Net->OutputLayer = (LAYER*) malloc(sizeof(LAYER));
Net->InputLayer->Units = N;
Net->InputLayer->Output = (INT*) calloc(N+1, sizeof(INT));
Net->InputLayer->Output[0] = BIAS;
Net->OutputLayer->Units = M;
Net->OutputLayer->Activation = (REAL*) calloc(M+1, sizeof(REAL));
Net->OutputLayer->Output = (INT*) calloc(M+1, sizeof(INT));
Net->OutputLayer->Error = (REAL*) calloc(M+1, sizeof(REAL));
Net->OutputLayer->Weight = (REAL**) calloc(M+1, sizeof(REAL*));
for (i=1; i<=M; i++) {
Net->OutputLayer->Weight[i] = (REAL*) calloc(N+1, sizeof(REAL));
}
Net->Eta = 0.1;
Net->Epsilon = 0.01;
}
void RandomWeights(NET* Net)
{
INT i,j;
for (i=1; i<=Net->OutputLayer->Units; i++) {
for (j=0; j<=Net->InputLayer->Units; j++) {
Net->OutputLayer->Weight[i][j] = RandomEqualREAL(-0.5, 0.5);
}
}
}
void SetInput(NET* Net, INT* Input, BOOL Protocoling)
{
INT i;
for (i=1; i<=Net->InputLayer->Units; i++) {
Net->InputLayer->Output[i] = Input[i-1];
}
if (Protocoling) {
WriteInput(Net, Input);
}
}
void GetOutput(NET* Net, INT* Output, BOOL Protocoling)
{
INT i;

for (i=1; i<=Net->OutputLayer->Units; i++) {


Output[i-1] = Net->OutputLayer->Output[i];
}
if (Protocoling) {
WriteOutput(Net, Output);
}
}
/*****************************************************************************
*
PROPAGATINGSIGNALS
******************************************************************************
/
void PropagateNet(NET* Net)
{
INT i,j;
REAL Sum;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Sum = 0;
for (j=0; j<=Net->InputLayer->Units; j++) {
Sum += Net->OutputLayer->Weight[i][j] * Net->InputLayer->Output[j];
}
Net->OutputLayer->Activation[i] = Sum;
if (Sum >= 0)
Net->OutputLayer->Output[i] = HI;
else
Net->OutputLayer->Output[i] = LO;
}
}
/*****************************************************************************
*
ADJUSTINGWEIGHTS
******************************************************************************
/
void ComputeOutputError(NET* Net, INT* Target)
{
INT i;
REAL Err;
Net->Error = 0;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Err = Target[i-1] - Net->OutputLayer->Activation[i];
Net->OutputLayer->Error[i] = Err;
Net->Error += 0.5 * sqr(Err);
}
}
void AdjustWeights(NET* Net)
{
INT i,j;
INT Out;
REAL Err;
for (i=1; i<=Net->OutputLayer->Units; i++) {
for (j=0; j<=Net->InputLayer->Units; j++) {

Out = Net->InputLayer->Output[j];
Err = Net->OutputLayer->Error[i];
Net->OutputLayer->Weight[i][j] += Net->Eta * Err * Out;
}
}
}
/*****************************************************************************
*
SIMULATINGTHENET
******************************************************************************
/
void SimulateNet(NET* Net, INT* Input, INT* Target, BOOL Training, BOOL
Protocoling)
{
INT Output[M];
SetInput(Net, Input, Protocoling);
PropagateNet(Net);
GetOutput(Net, Output, Protocoling);
ComputeOutputError(Net, Target);
if (Training)
AdjustWeights(Net);
}
/*****************************************************************************
*
MAIN
******************************************************************************
/
void main()
{
NET Net;
REAL Error;
BOOL Stop;
INT n,m;
InitializeRandoms();
GenerateNetwork(&Net);
RandomWeights(&Net);
InitializeApplication(&Net);
do {
Error = 0;
Stop = TRUE;
for (n=0; n<NUM_DATA; n++) {
SimulateNet(&Net, Input[n], Output[n], FALSE, FALSE);
Error = MAX(Error, Net.Error);
Stop = Stop AND (Net.Error < Net.Epsilon);
}
Error = MAX(Error, Net.Epsilon);
printf("Training %0.0f%% completed ...\n", (Net.Epsilon / Error) * 100);
if (NOT Stop) {
for (m=0; m<10*NUM_DATA; m++) {
n = RandomEqualINT(0, NUM_DATA-1);
SimulateNet(&Net, Input[n], Output[n], TRUE, FALSE);

}
}
} while (NOT Stop);
for (n=0; n<NUM_DATA; n++) {
SimulateNet(&Net, Input[n], Output[n], FALSE, TRUE);
}
FinalizeApplication(&Net);
}
Simulator Output for the Pattern Recognition Application
OOO
OO
OO
OO
OO
OO
OOO -> 0
O
OO
OO
O
O
O
O -> 1
OOO
OO
O
O
O
O
OOOOO -> 2
OOO
OO
O
OOO
O
OO
OOO -> 3
O
OO
OO
OO
OOOOO
O
O -> 4
OOOOO
O
O
OOOO
O
OO
OOO -> 5

OOO
OO
O
OOOO
OO
OO
OOO -> 6
OOOOO
O
O
O
O
O
O -> 7
OOO
OO
OO
OOO
OO
OO
OOO -> 8
OOO
OO
OO
OOOO
O
OO
OOO -> 9

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

Signature of Professor

7- Write a Program to implement ADALINE network for the purpose of


pattern recognition.
/*****************************************************************************
*
DECLARATIONS
******************************************************************************
/
#include <stdlib.h>
#include <stdio.h>
typedef int BOOL;
typedef char CHAR;
typedef int INT;
typedef double REAL;
#define FALSE 0
#define TRUE 1
#define NOT !
#define AND &&
#define OR ||
#define MIN(x,y) ((x)<(y) ? (x) : (y))
#define MAX(x,y) ((x)>(y) ? (x) : (y))
#define LO -1
#define HI +1
#define BIAS 1
#define sqr(x) ((x)*(x))
typedef struct { /* A LAYER OF A NET: */
INT Units; /* - number of units in this layer */
REAL* Activation; /* - activation of ith unit */
INT* Output; /* - output of ith unit */
REAL* Error; /* - error term of ith unit */
REAL** Weight; /* - connection weights to ith unit */
} LAYER;
typedef struct { /* A NET: */
LAYER* InputLayer; /* - input layer */
LAYER* OutputLayer; /* - output layer */
REAL Eta; /* - learning rate */

REAL Error; /* - total net error */


REAL Epsilon; /* - net error to terminate training */
} NET;
/*****************************************************************************
*
RANDOMSDRAWNFROMDISTRIBUTIONS
******************************************************************************
/
void InitializeRandoms()
{
srand(4711);
}
INT RandomEqualINT(INT Low, INT High)
{
return rand() % (High-Low+1) + Low;
}
REAL RandomEqualREAL(REAL Low, REAL High)
{
return ((REAL) rand() / RAND_MAX) * (High-Low) + Low;
}
/*****************************************************************************
*
APPLICATION-SPECIFICCODE
******************************************************************************
/
#define NUM_DATA 10
#define X 5
#define Y 7
#define N (X * Y)
#define M 10
CHAR Pattern[NUM_DATA][Y][X] = { { " OOO ",
"O O",
"O O",
"O O",
"O O",
"O O",
" OOO " },
{ " O ",
" OO ",
"O O ",
" O ",
" O ",
" O ",
" O " },
{ " OOO ",
"O O",
" O",
" O ",
" O ",
" O ",
"OOOOO" },

{ " OOO ",


"O O",
" O",
" OOO ",
" O",
"O O",
" OOO " },
{ " O ",
" OO ",
" O O ",
"O O ",
"OOOOO",
" O ",
" O " },
{ "OOOOO",
"O ",
"O ",
"OOOO ",
" O",
"O O",
" OOO " },
{ " OOO ",
"O O",
"O ",
"OOOO ",
"O O",
"O O",
" OOO " },
{ "OOOOO",
" O",
" O",
" O ",
" O ",
" O ",
"O " },
{ " OOO ",
"O O",
"O O",
" OOO ",
"O O",
"O O",
" OOO " },
{ " OOO ",
"O O",
"O O",
" OOOO",
" O",
"O O",
" OOO " } };
INT Input [NUM_DATA][N];
INT Output[NUM_DATA][M] =

{ {HI, LO, LO, LO, LO, LO, LO, LO, LO, LO},
{LO, HI, LO, LO, LO, LO, LO, LO, LO, LO},
{LO, LO, HI, LO, LO, LO, LO, LO, LO, LO},
{LO, LO, LO, HI, LO, LO, LO, LO, LO, LO},
{LO, LO, LO, LO, HI, LO, LO, LO, LO, LO},
{LO, LO, LO, LO, LO, HI, LO, LO, LO, LO},
{LO, LO, LO, LO, LO, LO, HI, LO, LO, LO},
{LO, LO, LO, LO, LO, LO, LO, HI, LO, LO},
{LO, LO, LO, LO, LO, LO, LO, LO, HI, LO},
{LO, LO, LO, LO, LO, LO, LO, LO, LO, HI} };
FILE* f;
void InitializeApplication(NET* Net)
{
INT n,i,j;
Net->Eta = 0.001;
Net->Epsilon = 0.0001;
for (n=0; n<NUM_DATA; n++) {
for (i=0; i<Y; i++) {
for (j=0; j<X; j++) {
Input[n][i*X+j] = (Pattern[n][i][j] == 'O') ? HI : LO;
}
}
}
f = fopen("ADALINE.txt", "w");
}
void WriteInput(NET* Net, INT* Input)
{
INT i;
for (i=0; i<N; i++) {
if (i%X == 0) {
fprintf(f, "\n");
}
fprintf(f, "%c", (Input[i] == HI) ? 'O' : ' ');
}
fprintf(f, " -> ");
}
void WriteOutput(NET* Net, INT* Output)
{
INT i;
INT Count, Index;
Count = 0;
for (i=0; i<M; i++) {
if (Output[i] == HI) {
Count++;
Index = i;
}
}
if (Count == 1)
fprintf(f, "%i\n", Index);
else
fprintf(f, "%s\n", "invalid");

}
void FinalizeApplication(NET* Net)
{
fclose(f);
}
/*****************************************************************************
*
INITIALIZATION
******************************************************************************
/
void GenerateNetwork(NET* Net)
{
INT i;
Net->InputLayer = (LAYER*) malloc(sizeof(LAYER));
Net->OutputLayer = (LAYER*) malloc(sizeof(LAYER));
Net->InputLayer->Units = N;
Net->InputLayer->Output = (INT*) calloc(N+1, sizeof(INT));
Net->InputLayer->Output[0] = BIAS;
Net->OutputLayer->Units = M;
Net->OutputLayer->Activation = (REAL*) calloc(M+1, sizeof(REAL));
Net->OutputLayer->Output = (INT*) calloc(M+1, sizeof(INT));
Net->OutputLayer->Error = (REAL*) calloc(M+1, sizeof(REAL));
Net->OutputLayer->Weight = (REAL**) calloc(M+1, sizeof(REAL*));
for (i=1; i<=M; i++) {
Net->OutputLayer->Weight[i] = (REAL*) calloc(N+1, sizeof(REAL));
}
Net->Eta = 0.1;
Net->Epsilon = 0.01;
}
void RandomWeights(NET* Net)
{
INT i,j;
for (i=1; i<=Net->OutputLayer->Units; i++) {
for (j=0; j<=Net->InputLayer->Units; j++) {
Net->OutputLayer->Weight[i][j] = RandomEqualREAL(-0.5, 0.5);
}
}
}
void SetInput(NET* Net, INT* Input, BOOL Protocoling)
{
INT i;
for (i=1; i<=Net->InputLayer->Units; i++) {
Net->InputLayer->Output[i] = Input[i-1];
}
if (Protocoling) {
WriteInput(Net, Input);
}
}
void GetOutput(NET* Net, INT* Output, BOOL Protocoling)
{
INT i;

for (i=1; i<=Net->OutputLayer->Units; i++) {


Output[i-1] = Net->OutputLayer->Output[i];
}
if (Protocoling) {
WriteOutput(Net, Output);
}
}
/*****************************************************************************
*
PROPAGATINGSIGNALS
******************************************************************************
/
void PropagateNet(NET* Net)
{
INT i,j;
REAL Sum;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Sum = 0;
for (j=0; j<=Net->InputLayer->Units; j++) {
Sum += Net->OutputLayer->Weight[i][j] * Net->InputLayer->Output[j];
}
Net->OutputLayer->Activation[i] = Sum;
if (Sum >= 0)
Net->OutputLayer->Output[i] = HI;
else
Net->OutputLayer->Output[i] = LO;
}
}
/*****************************************************************************
*
ADJUSTINGWEIGHTS
******************************************************************************
/
void ComputeOutputError(NET* Net, INT* Target)
{
INT i;
REAL Err;
Net->Error = 0;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Err = Target[i-1] - Net->OutputLayer->Activation[i];
Net->OutputLayer->Error[i] = Err;
Net->Error += 0.5 * sqr(Err);
}
}
void AdjustWeights(NET* Net)
{
INT i,j;
INT Out;
REAL Err;
for (i=1; i<=Net->OutputLayer->Units; i++) {
for (j=0; j<=Net->InputLayer->Units; j++) {

Out = Net->InputLayer->Output[j];
Err = Net->OutputLayer->Error[i];
Net->OutputLayer->Weight[i][j] += Net->Eta * Err * Out;
}
}
}
/*****************************************************************************
*
SIMULATINGTHENET
******************************************************************************
/
void SimulateNet(NET* Net, INT* Input, INT* Target, BOOL Training, BOOL
Protocoling)
{
INT Output[M];
SetInput(Net, Input, Protocoling);
PropagateNet(Net);
GetOutput(Net, Output, Protocoling);
ComputeOutputError(Net, Target);
if (Training)
AdjustWeights(Net);
}
/*****************************************************************************
*
MAIN
******************************************************************************
/
void main()
{
NET Net;
REAL Error;
BOOL Stop;
INT n,m;
InitializeRandoms();
GenerateNetwork(&Net);
RandomWeights(&Net);
InitializeApplication(&Net);
do {
Error = 0;
Stop = TRUE;
for (n=0; n<NUM_DATA; n++) {
SimulateNet(&Net, Input[n], Output[n], FALSE, FALSE);
Error = MAX(Error, Net.Error);
Stop = Stop AND (Net.Error < Net.Epsilon);
}
Error = MAX(Error, Net.Epsilon);
printf("Training %0.0f%% completed ...\n", (Net.Epsilon / Error) * 100);
if (NOT Stop) {
for (m=0; m<10*NUM_DATA; m++) {
n = RandomEqualINT(0, NUM_DATA-1);
SimulateNet(&Net, Input[n], Output[n], TRUE, FALSE);

}
}
} while (NOT Stop);
for (n=0; n<NUM_DATA; n++) {
SimulateNet(&Net, Input[n], Output[n], FALSE, TRUE);
}
FinalizeApplication(&Net);
}
Simulator Output for the Pattern Recognition Application
OOO
OO
OO
OO
OO
OO
OOO -> 0
O
OO
OO
O
O
O
O -> 1
OOO
OO
O
O
O
O
OOOOO -> 2
OOO
OO
O
OOO
O
OO
OOO -> 3
O
OO
OO
OO
OOOOO
O
O -> 4
OOOOO
O
O
OOOO
O
OO
OOO -> 5

OOO
OO
O
OOOO
OO
OO
OOO -> 6
OOOOO
O
O
O
O
O
O -> 7
OOO
OO
OO
OOO
OO
OO
OOO -> 8
OOO
OO
OO
OOOO
O
OO
OOO -> 9

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

Signature of Professor

8. WAP to implement back propagation network.


******************************************************************************
==================================================
==
Network: Backpropagation Network with Bias Terms and Momentum
==================================================
==
Application: Time-Series Forecasting
Prediction of the Annual Number of Sunspots
******************************************************************************
/
/*****************************************************************************
*
DECLARATIONS
******************************************************************************
/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
typedef int BOOL;
typedef int INT;
typedef double REAL;
#define FALSE 0
#define TRUE 1
#define NOT !
#define AND &&
#define OR ||
#define MIN_REAL -HUGE_VAL
#define MAX_REAL +HUGE_VAL
#define MIN(x,y) ((x)<(y) ? (x) : (y))
#define MAX(x,y) ((x)>(y) ? (x) : (y))
#define LO 0.1
#define HI 0.9
#define BIAS 1
#define sqr(x) ((x)*(x))

typedef struct { /* A LAYER OF A NET: */


INT Units; /* - number of units in this layer */
REAL* Output; /* - output of ith unit */
REAL* Error; /* - error term of ith unit */
REAL** Weight; /* - connection weights to ith unit */
REAL** WeightSave; /* - saved weights for stopped training */
REAL** dWeight; /* - last weight deltas for momentum */
} LAYER;
typedef struct { /* A NET: */
LAYER** Layer; /* - layers of this net */
LAYER* InputLayer; /* - input layer */
LAYER* OutputLayer; /* - output layer */
REAL Alpha; /* - momentum factor */
REAL Eta; /* - learning rate */
REAL Gain; /* - gain of sigmoid function */
REAL Error; /* - total net error */
} NET;
/*****************************************************************************
*
RANDOMSDRAWNFROMDISTRIBUTIONS
******************************************************************************
/
void InitializeRandoms()
{
srand(4711);
}
INT RandomEqualINT(INT Low, INT High)
{
return rand() % (High-Low+1) + Low;
}
REAL RandomEqualREAL(REAL Low, REAL High)
{
return ((REAL) rand() / RAND_MAX) * (High-Low) + Low;
}
/*****************************************************************************
*
APPLICATION-SPECIFICCODE
******************************************************************************
/
#define NUM_LAYERS 3
#define N 30
#define M 1
INT Units[NUM_LAYERS] = {N, 10, M};
#define FIRST_YEAR 1700
#define NUM_YEARS 280
#define TRAIN_LWB (N)
#define TRAIN_UPB (179)
#define TRAIN_YEARS (TRAIN_UPB - TRAIN_LWB + 1)
#define TEST_LWB (180)
#define TEST_UPB (259)
#define TEST_YEARS (TEST_UPB - TEST_LWB + 1)

#define EVAL_LWB (260)


#define EVAL_UPB (NUM_YEARS - 1)
#define EVAL_YEARS (EVAL_UPB - EVAL_LWB + 1)
REAL Sunspots_[NUM_YEARS];
REAL Sunspots [NUM_YEARS] = {
0.0262, 0.0575, 0.0837, 0.1203, 0.1883, 0.3033,
0.1517, 0.1046, 0.0523, 0.0418, 0.0157, 0.0000,
0.0000, 0.0105, 0.0575, 0.1412, 0.2458, 0.3295,
0.3138, 0.2040, 0.1464, 0.1360, 0.1151, 0.0575,
0.1098, 0.2092, 0.4079, 0.6381, 0.5387, 0.3818,
0.2458, 0.1831, 0.0575, 0.0262, 0.0837, 0.1778,
0.3661, 0.4236, 0.5805, 0.5282, 0.3818, 0.2092,
0.1046, 0.0837, 0.0262, 0.0575, 0.1151, 0.2092,
0.3138, 0.4231, 0.4362, 0.2495, 0.2500, 0.1606,
0.0638, 0.0502, 0.0534, 0.1700, 0.2489, 0.2824,
0.3290, 0.4493, 0.3201, 0.2359, 0.1904, 0.1093,
0.0596, 0.1977, 0.3651, 0.5549, 0.5272, 0.4268,
0.3478, 0.1820, 0.1600, 0.0366, 0.1036, 0.4838,
0.8075, 0.6585, 0.4435, 0.3562, 0.2014, 0.1192,
0.0534, 0.1260, 0.4336, 0.6904, 0.6846, 0.6177,
0.4702, 0.3483, 0.3138, 0.2453, 0.2144, 0.1114,
0.0837, 0.0335, 0.0214, 0.0356, 0.0758, 0.1778,
0.2354, 0.2254, 0.2484, 0.2207, 0.1470, 0.0528,
0.0424, 0.0131, 0.0000, 0.0073, 0.0262, 0.0638,
0.0727, 0.1851, 0.2395, 0.2150, 0.1574, 0.1250,
0.0816, 0.0345, 0.0209, 0.0094, 0.0445, 0.0868,
0.1898, 0.2594, 0.3358, 0.3504, 0.3708, 0.2500,
0.1438, 0.0445, 0.0690, 0.2976, 0.6354, 0.7233,
0.5397, 0.4482, 0.3379, 0.1919, 0.1266, 0.0560,
0.0785, 0.2097, 0.3216, 0.5152, 0.6522, 0.5036,
0.3483, 0.3373, 0.2829, 0.2040, 0.1077, 0.0350,
0.0225, 0.1187, 0.2866, 0.4906, 0.5010, 0.4038,
0.3091, 0.2301, 0.2458, 0.1595, 0.0853, 0.0382,
0.1966, 0.3870, 0.7270, 0.5816, 0.5314, 0.3462,
0.2338, 0.0889, 0.0591, 0.0649, 0.0178, 0.0314,
0.1689, 0.2840, 0.3122, 0.3332, 0.3321, 0.2730,
0.1328, 0.0685, 0.0356, 0.0330, 0.0371, 0.1862,
0.3818, 0.4451, 0.4079, 0.3347, 0.2186, 0.1370,
0.1396, 0.0633, 0.0497, 0.0141, 0.0262, 0.1276,
0.2197, 0.3321, 0.2814, 0.3243, 0.2537, 0.2296,
0.0973, 0.0298, 0.0188, 0.0073, 0.0502, 0.2479,
0.2986, 0.5434, 0.4215, 0.3326, 0.1966, 0.1365,
0.0743, 0.0303, 0.0873, 0.2317, 0.3342, 0.3609,
0.4069, 0.3394, 0.1867, 0.1109, 0.0581, 0.0298,
0.0455, 0.1888, 0.4168, 0.5983, 0.5732, 0.4644,
0.3546, 0.2484, 0.1600, 0.0853, 0.0502, 0.1736,
0.4843, 0.7929, 0.7128, 0.7045, 0.4388, 0.3630,
0.1647, 0.0727, 0.0230, 0.1987, 0.7411, 0.9947,
0.9665, 0.8316, 0.5873, 0.2819, 0.1961, 0.1459,
0.0534, 0.0790, 0.2458, 0.4906, 0.5539, 0.5518,
0.5465, 0.3483, 0.3603, 0.1987, 0.1804, 0.0811,

0.0659, 0.1428, 0.4838, 0.8127


};
REAL Mean;
REAL TrainError;
REAL TrainErrorPredictingMean;
REAL TestError;
REAL TestErrorPredictingMean;
FILE* f;
void NormalizeSunspots()
{
INT Year;
REAL Min, Max;
Min = MAX_REAL;
Max = MIN_REAL;
for (Year=0; Year<NUM_YEARS; Year++) {
Min = MIN(Min, Sunspots[Year]);
Max = MAX(Max, Sunspots[Year]);
}
Mean = 0;
for (Year=0; Year<NUM_YEARS; Year++) {
Sunspots_[Year] =
Sunspots [Year] = ((Sunspots[Year]-Min) / (Max-Min)) * (HI-LO) + LO;
Mean += Sunspots[Year] / NUM_YEARS;
}
}
void InitializeApplication(NET* Net)
{
INT Year, i;
REAL Out, Err;
Net->Alpha = 0.5;
Net->Eta = 0.05;
Net->Gain = 1;
NormalizeSunspots();
TrainErrorPredictingMean = 0;
for (Year=TRAIN_LWB; Year<=TRAIN_UPB; Year++) {
for (i=0; i<M; i++) {
Out = Sunspots[Year+i];
Err = Mean - Out;
TrainErrorPredictingMean += 0.5 * sqr(Err);
}
}
TestErrorPredictingMean = 0;
for (Year=TEST_LWB; Year<=TEST_UPB; Year++) {
for (i=0; i<M; i++) {
Out = Sunspots[Year+i];
Err = Mean - Out;
TestErrorPredictingMean += 0.5 * sqr(Err);
}
}
f = fopen("BPN.txt", "w");
}

void FinalizeApplication(NET* Net)


{
fclose(f);
}
/*****************************************************************************
*
INITIALIZATION
******************************************************************************
/
void GenerateNetwork(NET* Net)
{
INT l,i;
Net->Layer = (LAYER**) calloc(NUM_LAYERS, sizeof(LAYER*));
for (l=0; l<NUM_LAYERS; l++) {
Net->Layer[l] = (LAYER*) malloc(sizeof(LAYER));
Net->Layer[l]->Units = Units[l];
Net->Layer[l]->Output = (REAL*) calloc(Units[l]+1, sizeof(REAL));
Net->Layer[l]->Error = (REAL*) calloc(Units[l]+1, sizeof(REAL));
Net->Layer[l]->Weight = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
Net->Layer[l]->WeightSave = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
Net->Layer[l]->dWeight = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
Net->Layer[l]->Output[0] = BIAS;
if (l != 0) {
for (i=1; i<=Units[l]; i++) {
Net->Layer[l]->Weight[i] = (REAL*) calloc(Units[l-1]+1,
sizeof(REAL));
Net->Layer[l]->WeightSave[i] = (REAL*) calloc(Units[l-1]+1,
sizeof(REAL));
Net->Layer[l]->dWeight[i] = (REAL*) calloc(Units[l-1]+1,
sizeof(REAL));
}
}
}
Net->InputLayer = Net->Layer[0];
Net->OutputLayer = Net->Layer[NUM_LAYERS - 1];
Net->Alpha = 0.9;
Net->Eta = 0.25;
Net->Gain = 1;
}
void RandomWeights(NET* Net)
{
INT l,i,j;
for (l=1; l<NUM_LAYERS; l++) {
for (i=1; i<=Net->Layer[l]->Units; i++) {
for (j=0; j<=Net->Layer[l-1]->Units; j++) {
Net->Layer[l]->Weight[i][j] = RandomEqualREAL(-0.5, 0.5);
}
}
}
}
void SetInput(NET* Net, REAL* Input)

{
INT i;
for (i=1; i<=Net->InputLayer->Units; i++) {
Net->InputLayer->Output[i] = Input[i-1];
}
}
void GetOutput(NET* Net, REAL* Output)
{
INT i;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Output[i-1] = Net->OutputLayer->Output[i];
}
}
/*****************************************************************************
*
SUPPORTFORSTOPPEDTRAINING
******************************************************************************
/
void SaveWeights(NET* Net)
{
INT l,i,j;
for (l=1; l<NUM_LAYERS; l++) {
for (i=1; i<=Net->Layer[l]->Units; i++) {
for (j=0; j<=Net->Layer[l-1]->Units; j++) {
Net->Layer[l]->WeightSave[i][j] = Net->Layer[l]->Weight[i][j];
}
}
}
}
void RestoreWeights(NET* Net)
{
INT l,i,j;
for (l=1; l<NUM_LAYERS; l++) {
for (i=1; i<=Net->Layer[l]->Units; i++) {
for (j=0; j<=Net->Layer[l-1]->Units; j++) {
Net->Layer[l]->Weight[i][j] = Net->Layer[l]->WeightSave[i][j];
}
}
}
}
/*****************************************************************************
*
PROPAGATINGSIGNALS
******************************************************************************
/
void PropagateLayer(NET* Net, LAYER* Lower, LAYER* Upper)
{
INT i,j;
REAL Sum;
for (i=1; i<=Upper->Units; i++) {
Sum = 0;

for (j=0; j<=Lower->Units; j++) {


Sum += Upper->Weight[i][j] * Lower->Output[j];
}
Upper->Output[i] = 1 / (1 + exp(-Net->Gain * Sum));
}
}
void PropagateNet(NET* Net)
{
INT l;
for (l=0; l<NUM_LAYERS-1; l++) {
PropagateLayer(Net, Net->Layer[l], Net->Layer[l+1]);
}
}
/*****************************************************************************
*
BACKPROPAGATINGERRORS
******************************************************************************
/
void ComputeOutputError(NET* Net, REAL* Target)
{
INT i;
REAL Out, Err;
Net->Error = 0;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Out = Net->OutputLayer->Output[i];
Err = Target[i-1]-Out;
Net->OutputLayer->Error[i] = Net->Gain * Out * (1-Out) * Err;
Net->Error += 0.5 * sqr(Err);
}
}
void BackpropagateLayer(NET* Net, LAYER* Upper, LAYER* Lower)
{
INT i,j;
REAL Out, Err;
for (i=1; i<=Lower->Units; i++) {
Out = Lower->Output[i];
Err = 0;
for (j=1; j<=Upper->Units; j++) {
Err += Upper->Weight[j][i] * Upper->Error[j];
}
Lower->Error[i] = Net->Gain * Out * (1-Out) * Err;
}
}
void BackpropagateNet(NET* Net)
{
INT l;
for (l=NUM_LAYERS-1; l>1; l--) {
BackpropagateLayer(Net, Net->Layer[l], Net->Layer[l-1]);
}
}
void AdjustWeights(NET* Net)

{
INT l,i,j;
REAL Out, Err, dWeight;
for (l=1; l<NUM_LAYERS; l++) {
for (i=1; i<=Net->Layer[l]->Units; i++) {
for (j=0; j<=Net->Layer[l-1]->Units; j++) {
Out = Net->Layer[l-1]->Output[j];
Err = Net->Layer[l]->Error[i];
dWeight = Net->Layer[l]->dWeight[i][j];
Net->Layer[l]->Weight[i][j] += Net->Eta * Err * Out + Net->Alpha *
dWeight;
Net->Layer[l]->dWeight[i][j] = Net->Eta * Err * Out;
}
}
}
}
/*****************************************************************************
*
SIMULATINGTHENET
******************************************************************************
/
void SimulateNet(NET* Net, REAL* Input, REAL* Output, REAL* Target, BOOL
Training)
{
SetInput(Net, Input);
PropagateNet(Net);
GetOutput(Net, Output);
ComputeOutputError(Net, Target);
if (Training) {
BackpropagateNet(Net);
AdjustWeights(Net);
}
}
void TrainNet(NET* Net, INT Epochs)
{
INT Year, n;
REAL Output[M];
for (n=0; n<Epochs*TRAIN_YEARS; n++) {
Year = RandomEqualINT(TRAIN_LWB, TRAIN_UPB);
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), TRUE);
}
}
void TestNet(NET* Net)
{
INT Year;
REAL Output[M];
TrainError = 0;
for (Year=TRAIN_LWB; Year<=TRAIN_UPB; Year++) {
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
TrainError += Net->Error;
}

TestError = 0;
for (Year=TEST_LWB; Year<=TEST_UPB; Year++) {
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
TestError += Net->Error;
}
fprintf(f, "\nNMSE is %0.3f on Training Set and %0.3f on Test Set",
TrainError / TrainErrorPredictingMean,
TestError / TestErrorPredictingMean);
}
void EvaluateNet(NET* Net)
{
INT Year;
REAL Output [M];
REAL Output_[M];
fprintf(f, "\n\n\n");
fprintf(f, "Year Sunspots Open-Loop Prediction Closed-Loop
Prediction\n");
fprintf(f, "\n");
for (Year=EVAL_LWB; Year<=EVAL_UPB; Year++) {
SimulateNet(Net, &(Sunspots [Year-N]), Output, &(Sunspots [Year]), FALSE);
SimulateNet(Net, &(Sunspots_[Year-N]), Output_, &(Sunspots_[Year]), FALSE);
Sunspots_[Year] = Output_[0];
fprintf(f, "%d %0.3f %0.3f
%0.3f\n",
FIRST_YEAR + Year,
Sunspots[Year],
Output [0],
Output_[0]);
}
}
/*****************************************************************************
*
MAIN
******************************************************************************
/
void main()
{
NET Net;
BOOL Stop;
REAL MinTestError;
InitializeRandoms();
GenerateNetwork(&Net);
RandomWeights(&Net);
InitializeApplication(&Net);
Stop = FALSE;
MinTestError = MAX_REAL;
do {
TrainNet(&Net, 10);
TestNet(&Net);
if (TestError < MinTestError) {
fprintf(f, " - saving Weights ...");

MinTestError = TestError;
SaveWeights(&Net);
}
else if (TestError > 1.2 * MinTestError) {
fprintf(f, " - stopping Training and restoring Weights ...");
Stop = TRUE;
RestoreWeights(&Net);
}
} while (NOT Stop);
TestNet(&Net);
EvaluateNet(&Net);
FinalizeApplication(&Net);
}
Simulator Output for the Time-Series Forecasting Application
NMSE is 0.879 on Training Set and 0.834 on Test Set - saving Weights ...
NMSE is 0.818 on Training Set and 0.783 on Test Set - saving Weights ...
NMSE is 0.749 on Training Set and 0.693 on Test Set - saving Weights ...
NMSE is 0.691 on Training Set and 0.614 on Test Set - saving Weights ...
NMSE is 0.622 on Training Set and 0.555 on Test Set - saving Weights ...
NMSE is 0.569 on Training Set and 0.491 on Test Set - saving Weights ...
NMSE is 0.533 on Training Set and 0.467 on Test Set - saving Weights ...
NMSE is 0.490 on Training Set and 0.416 on Test Set - saving Weights ...
NMSE is 0.470 on Training Set and 0.401 on Test Set - saving Weights ...
NMSE is 0.441 on Training Set and 0.361 on Test Set - saving Weights ...
.
.
.
NMSE is 0.142 on Training Set and 0.143 on Test Set
NMSE is 0.142 on Training Set and 0.146 on Test Set
NMSE is 0.141 on Training Set and 0.143 on Test Set
NMSE is 0.146 on Training Set and 0.141 on Test Set
NMSE is 0.144 on Training Set and 0.141 on Test Set
NMSE is 0.140 on Training Set and 0.142 on Test Set
NMSE is 0.144 on Training Set and 0.148 on Test Set
NMSE is 0.140 on Training Set and 0.139 on Test Set - saving Weights ...
NMSE is 0.140 on Training Set and 0.140 on Test Set
NMSE is 0.141 on Training Set and 0.138 on Test Set - saving Weights ...
.
.
.
NMSE is 0.104 on Training Set and 0.154 on Test Set
NMSE is 0.102 on Training Set and 0.160 on Test Set
NMSE is 0.102 on Training Set and 0.160 on Test Set
NMSE is 0.100 on Training Set and 0.157 on Test Set
NMSE is 0.105 on Training Set and 0.153 on Test Set
NMSE is 0.100 on Training Set and 0.155 on Test Set
NMSE is 0.101 on Training Set and 0.154 on Test Set
NMSE is 0.100 on Training Set and 0.158 on Test Set
NMSE is 0.107 on Training Set and 0.170 on Test Set - stopping Training
and restoring Weights ...
NMSE is 0.141 on Training Set and 0.138 on Test Set

Year Sunspots Open-Loop Prediction Closed-Loop Prediction


1960 0.572 0.532 0.532
1961 0.327 0.334 0.301
1962 0.258 0.158 0.146
1963 0.217 0.156 0.098
1964 0.143 0.236 0.149
1965 0.164 0.230 0.273
1966 0.298 0.263 0.405
1967 0.495 0.454 0.552
1968 0.545 0.615 0.627
1969 0.544 0.550 0.589
1970 0.540 0.474 0.464
1971 0.380 0.455 0.305
1972 0.390 0.270 0.191
1973 0.260 0.275 0.139
1974 0.245 0.211 0.158
1975 0.165 0.181 0.170
1976 0.153 0.128 0.175
1977 0.215 0.151 0.193
1978 0.489 0.316 0.274
1979 0.754

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

Signature of Professor

9. WAP to implement Hopfield network.


/*****************************************************************************
*
==============
Network: Hopfield Model
==============
Application: Autoassociative Memory
Associative Recall of Images
***************************************************************************
***/
/*****************************************************************************
*
DECLARATIONS
*****************************************************************************#
i
nclude <stdlib.h>
#include <stdio.h>
typedef int BOOL;
typedef char CHAR;
typedef int INT;
#define FALSE 0
#define TRUE 1
#define NOT !
#define AND &&
#define OR ||
#define LO -1
#define HI +1
#define BINARY(x) ((x)==LO ? FALSE : TRUE)
#define BIPOLAR(x) ((x)==FALSE ? LO : HI)
typedef struct { /* A NET: */
INT Units; /* - number of units in this net */
INT* Output; /* - output of ith unit */
INT* Threshold; /* - threshold of ith unit */
INT** Weight; /* - connection weights to ith unit */
} NET;

/*****************************************************************************
*
RANDOMSDRAWNFROMDISTRIBUTIONS
******************************************************************************
/
void InitializeRandoms()
{
srand(4711);
}
INT RandomEqualINT(INT Low, INT High)
{
return rand() % (High-Low+1) + Low;
}
/*****************************************************************************
*
APPLICATION-SPECIFICCODE
******************************************************************************
/
#define NUM_DATA 5
#define X 10
#define Y 10
#define N (X * Y)
CHAR Pattern[NUM_DATA][Y][X] = { { "O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O" },
{ "OO OO OO",
"OO OO OO",
" OO OO ",
" OO OO ",
"OO OO OO",
"OO OO OO",
" OO OO ",
" OO OO ",
"OO OO OO",
"OO OO OO" },
{ "OOOOO ",
"OOOOO ",
"OOOOO ",
"OOOOO ",
"OOOOO ",
" OOOOO",
" OOOOO",
" OOOOO",
" OOOOO",

" OOOOO" },
{ "O O O O",
" O O O ",
" O O O ",
"O O O O",
" O O O ",
" O O O ",
"O O O O",
" O O O ",
" O O O ",
"O O O O" },
{ "OOOOOOOOOO",
"O O",
"O OOOOOO O",
"O O O O",
"O O OO O O",
"O O OO O O",
"O O O O",
"O OOOOOO O",
"O O",
"OOOOOOOOOO" } };
CHAR Pattern_[NUM_DATA][Y][X] = { { " ",
" ",
" ",
" ",
" ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O" },
{ "OOO O O",
" O OOO OO",
" O O OO O",
" OOO O ",
"OO O OOO",
" O OOO O",
"O OO O O",
" O OOO ",
"OO OOO O ",
" O O OOO" },
{ "OOOOO ",
"O O OOO ",
"O O OOO ",
"O O OOO ",
"OOOOO ",
" OOOOO",
" OOO O O",
" OOO O O",
" OOO O O",
" OOOOO" },

{ "O OOOO O",


"OO OOOO ",
"OOO OOOO ",
"OOOO OOOO",
" OOOO OOO",
" OOOO OO",
"O OOOO O",
"OO OOOO ",
"OOO OOOO ",
"OOOO OOOO" },
{ "OOOOOOOOOO",
"O O",
"O O",
"O O",
"O OO O",
"O OO O",
"O O",
"O O",
"O O",
"OOOOOOOOOO" } };
INT Input [NUM_DATA][N];
INT Input_[NUM_DATA][N];
FILE* f;
void InitializeApplication(NET* Net)
{
INT n,i,j;
for (n=0; n<NUM_DATA; n++) {
for (i=0; i<Y; i++) {
for (j=0; j<X; j++) {
Input [n][i*X+j] = BIPOLAR(Pattern [n][i][j] == 'O');
Input_[n][i*X+j] = BIPOLAR(Pattern_[n][i][j] == 'O');
}
}
}
f = fopen("HOPFIELD.txt", "w");
}
void WriteNet(NET* Net)
{
INT i,j;
for (i=0; i<Y; i++) {
for (j=0; j<X; j++) {
fprintf(f, "%c", BINARY(Net->Output[i*X+j]) ? 'O' : ' ');
}
fprintf(f, "\n");
}
fprintf(f, "\n");
}
void FinalizeApplication(NET* Net)
{
fclose(f);
}

/*****************************************************************************
*
INITIALIZATION
******************************************************************************
/
void GenerateNetwork(NET* Net)
{
INT i;
Net->Units = N;
Net->Output = (INT*) calloc(N, sizeof(INT));
Net->Threshold = (INT*) calloc(N, sizeof(INT));
Net->Weight = (INT**) calloc(N, sizeof(INT*));
for (i=0; i<N; i++) {
Net->Threshold[i] = 0;
Net->Weight[i] = (INT*) calloc(N, sizeof(INT));
}
}
void CalculateWeights(NET* Net)
{
INT i,j,n;
INT Weight;
for (i=0; i<Net->Units; i++) {
for (j=0; j<Net->Units; j++) {
Weight = 0;
if (i!=j) {
for (n=0; n<NUM_DATA; n++) {
Weight += Input[n][i] * Input[n][j];
}
}
Net->Weight[i][j] = Weight;
}
}
}
void SetInput(NET* Net, INT* Input)
{
INT i;
for (i=0; i<Net->Units; i++) {
Net->Output[i] = Input[i];
}
WriteNet(Net);
}
void GetOutput(NET* Net, INT* Output)
{
INT i;
for (i=0; i<Net->Units; i++) {
Output[i] = Net->Output[i];
}
WriteNet(Net);
}
/*****************************************************************************
*

PROPAGATINGSIGNALS
******************************************************************************
/
BOOL PropagateUnit(NET* Net, INT i)
{
INT j;
INT Sum, Out;
BOOL Changed;
Changed = FALSE;
Sum = 0;
for (j=0; j<Net->Units; j++) {
Sum += Net->Weight[i][j] * Net->Output[j];
}
if (Sum != Net->Threshold[i]) {
if (Sum < Net->Threshold[i]) Out = LO;
if (Sum > Net->Threshold[i]) Out = HI;
if (Out != Net->Output[i]) {
Changed = TRUE;
Net->Output[i] = Out;
}
}
return Changed;
}
void PropagateNet(NET* Net)
{
INT Iteration, IterationOfLastChange;
Iteration = 0;
IterationOfLastChange = 0;
do {
Iteration++;
if (PropagateUnit(Net, RandomEqualINT(0, Net->Units-1)))
IterationOfLastChange = Iteration;
} while (Iteration-IterationOfLastChange < 10*Net->Units);
}
/*****************************************************************************
*
SIMULATINGTHENET
******************************************************************************
/
void SimulateNet(NET* Net, INT* Input)
{
INT Output[N];
SetInput(Net, Input);
PropagateNet(Net);
GetOutput(Net, Output);
}
/*****************************************************************************
*
MAIN
******************************************************************************
/

void main()
{
NET Net;
INT n;
InitializeRandoms();
GenerateNetwork(&Net);
InitializeApplication(&Net);
CalculateWeights(&Net);
for (n=0; n<NUM_DATA; n++) {
SimulateNet(&Net, Input[n]);
}
for (n=0; n<NUM_DATA; n++) {
SimulateNet(&Net, Input_[n]);
}
FinalizeApplication(&Net);
}
Simulator Output for the Autoassociative Memory Application
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
O O O O O -> O O O O O
OO OO OO OO OO OO
OO OO OO OO OO OO
OO OO OO OO
OO OO OO OO
OO OO OO OO OO OO
OO OO OO OO OO OO
OO OO OO OO
OO OO OO OO
OO OO OO OO OO OO
OO OO OO -> OO OO OO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO -> OOOOO
OOOOOOOO
OOOOOO
OOOOOO
OOOOOOOO

OOOOOO
OOOOOO
OOOOOOOO
OOOOOO
OOOOOO
O O O O -> O O O O
OOOOOOOOOO OOOOOOOOOO
OOOO
O OOOOOO O O OOOOOO O
OOOOOOOO
O O OO O O O O OO O O
O O OO O O O O OO O O
OOOOOOOO
O OOOOOO O O OOOOOO O
OOOO
OOOOOOOOOO -> OOOOOOOOOO
OOOOO
OOOOO
OOOOO
OOOOO
OOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
O O O O O -> O O O O O
OOO O O OO OO OO
O OOO OO OO OO OO
O O OO O OO OO
OOO O OO OO
OO O OOO OO OO OO
O OOO O OO OO OO
O OO O O OO OO
O OOO OO OO
OO OOO O OO OO OO
O O OOO -> OO OO OO
OOOOO OOOOO
O O OOO OOOOO
O O OOO OOOOO
O O OOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOO O O OOOOO
OOO O O OOOOO
OOO O O OOOOO
OOOOO -> OOOOO
O OOOO O O O O O
OO OOOO O O O
OOO OOOO O O O
OOOO OOOO O O O O
OOOO OOO O O O

OOOO OO O O O
O OOOO O O O O O
OO OOOO O O O
OOO OOOO O O O
OOOO OOOO -> O O O O
OOOOOOOOOO OOOOOOOOOO
OOOO
O O O OOOOOO O
OOOOOO
O OO O O O OO O O
O OO O O O OO O O
OOOOOO
O O O OOOOOO O
OOOO
OOOOOOOOOO -> OOOOOOOOOO

Sushila Devi Bansal College Of Technology, Indore


CLASS WORK
SESSIONAL WORK

ASSINGMENT NO.
EXPERIMET NO.

SUBMITTED ON.. MARKS OR GRADE OBTAINED


NAME..ROLL NO..
CLASS DEPARTMENT.
SUBJECT CODE NO..

Signature of Student

Signature of Professor

10. Case study on NET TALK


NETtalk is an artificial neural network. It is the result of research carried out in the
mid 1980s by Terrence Sejnowski and Charles Rosenberg. The intent behind NETtalk
was to construct simplified models that might shed light on the complexity of
learning human level cognitive tasks, and their implementation as a connectionist
model that could also learn to perform a comparable task.
NETtalk is a program that learns to pronounce written English text by being shown
text as input and matching audio for comparison.
Achievements and limitations
It is a particularly fascinating neural network because hearing the audio examples of
the neural network as it progresses through training seems to progress from a baby
babbling to what sounds like a young child reading a kindergarten text, making the
occasional mistake, but clearly demonstrating that it has learned the major rules of
reading.
To those that do not rigorously study neural networks and their limitations, it would
appear to be artificial intelligence in the truest sense of the word. Claims have been
printed in the past by some misinformed authors of NETtalk learning to read at the
level of a 4 year old human, in about 16 hours! Such a claim, while not an outright lie,
is an example of misunderstanding what human brains do when they read, and what
NETtalk is capable of learning. Being able to read and pronounce text is not the same
as actually comprehending what is being read and understanding in terms of actual
imagery and knowledge representation, and this is a key difference between a human
child learning to read and an experimental neural network such as NETtalk. In other
words, being able to pronounce "grandmother" is not the same as knowing who or
what a grandmother is, and how she relates to your immediate family, or what she
looks like. NETtalk does not specifically address human-level knowledge
representation or its complexities.
NETtalk was created to explore the mechanisms of learning to correctly pronounce
English text. The authors note that learning to read involves a complex mechanism
involving many parts of the human brain. NETtalk does not specifically model the
image processing stages and letter recognition of the visual cortex. Rather, it assumes
that the letters have been pre-classified and recognized, and these letter sequences
comprising words are then shown to the neural network during training and during
performance testing. It is NETtalk's task to learn proper associations between the

correct pronunciation with a given sequence of letters based on the context in which
the letters appear. In other words NETtalk learns to use the letters around the
currently pronounced phoneme that provide cues as to its intended phonemic
mapping.
Smartphone App
Features
Free calling to the US and Canada
Connectivity through WiFi / 3G / Edge
Record conversations
Import your contacts from your phone
Receive customized support through 611
Dial 2663 for FREE conference calling bridge
Free 411 Directory Assistance

How it Works
1. Visit m.nettalk.com to create your netTALK Smartphone Account.
2. Download and install the netTALK Smartphone App on your mobile device.
3. Use the username and password you created to login to the netTALK
smartphone App.
4. Call your friends for free and invite them to do the same.

You might also like