Program 1:
%Write a program to generate a few activation function that are being used in neural networks.
x=-10:0.1:10;
tmp=exp(-x);
y1=1./(1+tmp);
y2=(1-tmp)./(1+tmp);
y3=x;
subplot(231);plot(x,y1);grid on;
axis([min(x) max(x) -2 2]);
title('logistic function');
xlabel('(a)');
axis('square');
subplot(232);plot(x,y2);grid on;
axis([min(x) max(x) -2 2]);
title('hyperbolic function');
xlabel('(b)');
axis('square');
subplot(233);plot(x,y3);grid on;
axis([min(x) max(x) min(x) max(x)]);
title('Identity function');
xlabel('(c)');
axis('square');
output:
Program 2:
%Generate ANDNOT function using Mcculloch-pits neural net by a %MATLAB program getting weights & threashold values
disp('Enter weight:');
w1=input('weight w1');
w2=input('weight w2');
disp('Enter your threashold value:');
theta=input('theta');
y=[0 0 0 0];
x1=[0 0 1 1];
x2=[0 1 0 1];
z=[0 0 1 0];
con=1;
while con
zin=x1*w1+x2*w2;
for i=1:4
if zin(i)>=theta
y(i)=1;
else
y(i)=0;
end
end
disp('Output of net');
disp(y);
if y==z
con=0;
else
disp('Net is not learning. Enter another set of weights and threshold value');
w1=input('weight w1=');
w2=input('weight w2=');
theta=input('theta=');
end
disp('mcculloch-pits net for ANDNOT function');
end
Output:
Enter weight:
weight w1 1
weight w2 1
Enter your threashold value:
theta 0.1
Output of net
0 1 1 1
Net is not learning. Enter another set of weights and threshold value
weight w1=1
weight w2=-1
theta=1
mcculoch-pits net for ANDNOT function
Output of net
0 0 1 0
mcculoch-pits net for ANDNOT function
Program 3:
%Generate XOR function using Mcculloch-pits neuron by writing %an m-file.
clear;
clc;
disp('Enter weight:');
w11=input('weight w11');
w12=input('weight w12');
w21=input('weight w21');
w22=input('weight w22');
v1=input('weight v1');
v2=input('weight v2');
disp('Enter your threashold value:');
theta=input('theta');
x1=[0 0 1 1];
x2=[0 1 0 1];
z=[0 1 1 0];
con=1;
while con
zin1=x1*w11+x2*w21;
zin2=x1*w21+x2*w22;
for i=1:4
if zin1(i)>=theta
y1(i)=1;
else
y1(i)=0;
end
if zin2(i)>=theta
y2(i)=1;
else
y2(i)=0;
end
end
yin=y1*v1+y2*v2;
for i=1:4
if yin(i)>=theta
y(i)=1;
else
y(i)=0;
end
end
disp('Output of net');
disp(y);
if y==z
con=0;
else
disp('Net is not learning. Enter another set of weights and threshold value');
disp('Enter weight:');
w11=input('weight w11');
w12=input('weight w12');
w21=input('weight w21');
w22=input('weight w22');
v1=input('weight v1');
v2=input('weight v2');
disp('Enter your threashold value:');
theta=input('theta');
end
end
disp('mcculoch-pits net for XOR function');
Output:
Enter weight:
weight w11 1
weight w12 -1
weight w21 -1
weight w22 1
weight v1 1
weight v2 1
Enter your threashold value:
theta 1
Output of net
0 1 1 0
Program 4:
% Write a MATLAB program for perceptron net for an AND %function with bipolar inputs and targets.
clear;
clc;
x=[1 1 -1 -1;1 -1 1 -1];
t=[1 -1 -1 -1];
w=[0 0];
b=0;
alpha=input ('enter learning rate');
theta=input('enter threshoald value');
con=1;
epoch=0;
while con
con=0;
for i=1:4
yin=b+x(1,i)*w(1)+x(2,i)*w(2);
if yin>theta
y=1;
end
if yin<=theta& yin>=-theta
y=0;
end
if yin<-theta
y=-1;
end
if y-t(i)
con=1;
for j=1:2
w(j)=w(j)+alpha*t(i)*x(j,i);
end
b=b+alpha*t(i);
end
end
epoch=epoch+1;
end
disp('perceptron for AND function');
disp('final weight matrix');
disp(w);
disp('final bias');
disp(b);
Output:
enter learning rate 1
enter threshoald value0.5
perceptron for AND function
final weight matrix
1 1
final bias
-1
Program 5: With a suitable example demonstrate the perceptron learning with its decision region using MATLAB. Give the output in graphical form.
clear
p=5;
N=50;
X=2*rand(p-1,2*N)-1;
nn=round((2*N-1)*rand(N,1))+1;
X(:,nn)=sin(X(:,nn));
X=[X;ones(1,2*N)];
wht=3*rand(1,p)-1; wht=wht/norm(wht);
wht;
D=(wht*X>=0);
Xv=X(:, N+1:2*N);
Dv=D(:,N+1:2*N);
X=X(:,1:N);
D=D(:,1:N);
%[X;D]
pr=[1,3];
Xp=X(pr,:);
wp=wht([pr p]);
c0=find(D==0); c1=find(D==1);
figure(1),clf reset
plot(Xp(1,c0),Xp(2,c0),'o',Xp(1,c1),Xp(2,c1),'X')
axis(axis),hold on
L=[-1 1];
S=-diag([1 1]./wp(1:2.))*(wp([2,1])'*L+wp(3));
plot([S(1,:) L],[L S(2,:)]), grid, drawnow
%PART 2:Learning
eta=0.5;%The training gain
wh=2*rand(1,p)-1;
wp=wh([pr p]);
S=-diag([1 1]./wp(1:2))*(wp([2,1])'*L +wp(3));
plot([S(1,:) L],[L S(2,:)]),grid on, drawnow
C=50;
E=[C+1,zeros(1,C)];
WW=zeros(C*N,p);
c=1;
cw=0;
while(E(c)>1)||(c==1)
c=c+1;
plot([S(1,:) L],[L S(2,:)],'w'),drawnow;
for n=1:N
eps=D(n)-((wh*X(:,n))>=0);%eps(n)=d(n)-y(n)
wh=wh+eta*eps*X(:,n)';
cw=cw+1;
WW(cw,:)=wh/norm(wh);
E(c)=E(c)+abs(eps);
end;
wp=wh([pr p]);
S=-diag([1 1]./wp(1:2))*(wp([2 1])'*L+wp(3));
plot([S(1,:) L],[L S(2,:)],'g'),drawnow
end;
WW=WW(1:cw,pr);
E=E(2:c+1);
Output:
Program 6:
Develop a MATLAB program to perform adaptive %prediction with Adaline.
clear;
clc;
f1=2';
ts=1/(40*f1);
N=100;
t1=(0:N)*4*ts;
t2=(0:2*N)*ts+4*(N+1)*ts;
t=[t1 t2];
N=size(t,2);
xt=[sin(2*pi*f1*t1) sin(2*pi*f1*t2)];
plot(t,xt),grid
title('signal to be detected ')
p=4;
X=convmtx(xt,p);
X=X(:,1:N);
d=xt;
y=zeros(size(d));
eps=zeros(size(d));
eta=0.4;
w=rand(1,p);
for n=1:N
y(n)=w*X(:,n);
eps(n)=d(n)-y(n);
w=w+eta*eps(n)*X(:,n)';
end
figure(1);
plot(t,d,'b',t,y,'-r'),grid
title('target and prediction signal'),xlabel('time[sec]')
figure(2);
plot(t,eps),grid
title('prediction error'),xlabel('time[sec]')
Output:
Program 7:
.Write a M-file for adaptive system identification using Adaline network.
clc;
f=0.8;
ts=0.005;
N1=800;
N2=400;N=N1+N2;
t1=(0:N1-1)*ts;
t2=(N1:N-1)*ts;
t=[t1 t2];
xt=sin(3*t.*sin(2*pi*f*t));
p=3;
b1=[1 -0.6 0.4];
b2=[0.9 -0.5 0.7];
[d1,stt]=filter(b1,1,xt(1:N1));
d2=filter(b2,1,xt(N1+1:N),stt);
dd=[d1 d2];
X=convmtx(xt,p);
X=X(:,1:N);
d=[b1*X(:,1:N1) b2*X(:,N1+1:N)];
y=zeros(size(d));
eps=zeros(size(d));
eta=0.2;
w=2*(rand(1,p)-0.5);
for n=1:N
y(n)=w*X(:,n);
eps(n)=d(n)-y(n);
w=w+eta*eps(n)*X(:,n)';
if n==N1-1,w1=w;
end
end
figure(1);
subplot(2,1,1)
plot(t,xt),grid
title('Input singnal ,x(t)'),xlabel('time[sec]')
subplot(2,1,2)
plot(t,d,'b',t,y,'-r'),grid
title('target and prediction signal'),xlabel('time[sec]')
figure(2);
plot(t,eps),grid
title(['prediction error for eta=',num2str(eta)]),xlabel('time[sec]')
[b1;w1]
[b2;w]
Output:
Program 8:
Develop a MATLAB program for adaptive noise cancellation using Adaline network.
clear;
clc;
f=4e3;
fm=300;
fa=200;
ts=2e-5;
N=400;
t=(0:N-1)*ts;
ut=(1+0.2*sin(2*pi*fa*t)).*sin(2*pi*f*(1+0.2*cos(2*pi*fm*t)).*t);
xt=sawtooth(2*pi*1e3*t,0.7);
b=[1 -0.6 0.3];
vt=filter(b,1,xt);
dt=ut+vt;
figure(1);
subplot(2,1,1);
plot(1e3*t,ut,1e3*t,dt),grid
title('Input singnal ,u(t)and noisy input signal d(t)'),xlabel('time[sec]')
subplot(2,1,2)
plot(1e3*t,xt,1e3*t,vt),grid
title('noise x(t)and coloured noise u(t)'),xlabel('time[sec]')
p=4;
X=convmtx(xt,p);
X=X(:,1:N);
y=zeros(1,N);
eps=zeros(1,N);
eta=0.05;
w=2*(rand(1,p)-0.5);
for c=1:4
for n=1:N
y(n)=w*X(:,n);
eps(n)=dt(n)-y(n);
w=w+eta*eps(n)*X(:,n)';
end
eta=0.8*eta;
end
figure(2);
subplot(2,1,1)
plot(1e3*t,ut,1e3*t,eps),grid
title('Input singnal ,u(t) and estimated signal uh(t)'),xlabel('time[sec]')
subplot(2,1,2)
plot(1e3*t(p:N),ut(p:N)-eps(p:N)),grid
title('estimation error'),xlabel('time[sec]')
Output:
\
Program 9: Develop a MATLAB program for adaptive noise cancellation using adalinenetwork.
clear;
clc;
f=4e3;
fm=300;
fa=200;
ts=2e-5;
N=400;
t=(0:N-1)*ts;
ut=(1+0.2*sin(2*pi*fa*t)).*sin(2*pi*f*(1+0.2*cos(2*pi*fm*t)).*t);
xt=sawtooth(2*pi*1e3*t,0.7);
b=[1 -0.6 -0.3];
vt=filter(b,1,xt);
dt=ut+vt;
figure(1)
subplot(2,1,1)
plot(1e3*t, ut, 1e3*t, dt),grid,title('Input u(t) and noisy input signal d(t)'),xlabel('time -- msec')
subplot(2,1,2)
plot(1e3*t, xt, 1e3*t, vt),grid,title('Noise x(t) and coloured noise v(t)'),xlabel('time --- msec')
p=4;
X=convmtx(xt,p); X=X(:,1:N);
y=zeros(1,N);
eps=zeros(1,N);
eta=0.05;
w=2*(rand(1,p)-0.5);
for c=1:4
for n=1:N
y(n)=w*X(:,n);
eps(n)=dt(n)-y(n);
w=w+eta*eps(n)*X(:,n)';
end
eta=0.8*eta;
end
figure(2)
subplot(2,1,1)
plot(1e3*t,ut,1e3*t,eps),grid,...
title('Input signal u(t) and estimated signal uh(t)'),...
xlabel('time--msec')
subplot(2,1,2)
plot(1e3*t(p:N),ut(p:N)-eps(p:N)),grid,...
title('estimation error'),xlabel('time --[msec]')
Output:
Program 10: Write a MATLAB program for calculating the weights for the following pattern using hetro associative neural net for mapping four input vectors to two output vectors
S1
S2
S3
S4
T1
T2
1
1
0
0
1
0
1
0
1
0
1
0
1
1
1
0
0
1
0
1
1
0
0
1
clc;
clear;
x=[1 1 0 0;1 0 1 0 ;1 1 1 0;0 1 1 0];
t=[1 0;1 0;0 1;0 1];
w=zeros(4,2);
for i=1:4
w=w+x(i,1:4)'*t(i,1:2);
end
disp('weight matrix');
disp(w);
Output:
>> weight matrix
2 1
1 2
1 2
0 0