1. 程式人生 > >固定檢測器資料與浮動車資料的融合演算法

固定檢測器資料與浮動車資料的融合演算法

%%%%%%%%%%%%%%%%%%%5 
clc                  %清空命令視窗
clear                %從工作空間(計算機記憶體)中刪除變數
tic, %開始計時
% 首先進行遺傳演算法
[P,T,R,S1,S2,S,Q] = nninit_test;   %BP網路初始化
aa=ones(S,1)*[-1 1];
popu=30;
initPpp=initializega(popu,aa,'gabpEval_test');
gen=80; % 遺傳代數
% 遺傳計算
[x endPop bPop trace]=ga(aa,'gabpEval_test',[],initPpp,[1e-6 1 1],'maxGenTerm',gen,...
  'normGeomSelect',[0.09],['arithXover'],[2],'nonUnifMutation',[2 gen 3]);
% x The best found Lets take a look at the performance of the ga during the
% run
subplot(2,1,1)
plot(trace(:,1),1./trace(:,3),'r-') %r-是紅色 表示各個進化代中所有個體的平均適應度函式值
hold on
plot(trace(:,1),1./trace(:,2),'b-') %b-是藍色 表示各個進化代中的最佳適應度函式值
xlabel('Generation');
ylabel('Sum-Squared Error');
subplot(2,1,2)
plot(trace(:,1),trace(:,3),'r-')    %r-是紅色 表示各個進化代中所有個體的平均適應度函式值
hold on
plot(trace(:,1),trace(:,2),'b-')    %b-是藍色 表示各個進化代中的最佳適應度函式值
xlabel('Generation');
ylabel('Fittness');
% 下面進行 wnn 演算法
figure(2)
% 將遺傳演算法的結果分解為wnn網路所對應的權值、閾值
[W1, shift_b1, W2, scale_a1, P, T, A1, A2, SE, val]=gadecod_test(x);

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55555

tic;
%初始化網路                       
IN=R;       
HN=S1;      
ON=S2;
N=300;
derros=zeros(1,(N+2));
studyspace=zeros(1,(N+2));
%初始引數
studyspace(1,2)=0.002;
amlf=1.001;
betat=0.999;
kkk=1.04;
mc=0.01;


v(:,:,2)=W1;
w=zeros(1,HN,N+2);
%W2(:,:,1)=rand(1,S1);
w(:,:,2)=W2;
a=zeros(1,HN,N+2);
a(:,:,2)=shift_b1';
b=zeros(1,HN,N+2);
b(:,:,2)=scale_a1';
%cshab;
mse=zeros(1,N+2);
ee=zeros(1,N+2);

%資料進行訓練
for n=3:(N+2)

wdel=zeros(1,HN);
vdel=zeros(HN,IN); 
adel=zeros(1,HN);
bdel=zeros(1,HN);
wincrease=zeros(1,HN);
vincrease=zeros(HN,IN);
aincrease=zeros(1,HN);
bincrease=zeros(1,HN);
m=1;
while m<Q+1

in=P(:,m);

r=zeros(1,HN); 
z=zeros(1,HN);
dz=zeros(1,HN);
s=0;
for j=1:HN
for i=1:IN
r(1,j)=r(1,j)+v(j,i,n-1)*in(i,1);
end
b1=b(1,j,n-1);
a1=a(1,j,n-1);
z(1,j)=hfun(r(1,j),b1,a1);
dz(1,j)=dhfun(r(1,j),b1,a1);
s=s+w(1,j,n-1)*z(1,j); 
end
y(m)=s;
EEE=sqrt(abs(y(m)-T(m)));
e=(T(m)-y(m));
for j=1:HN
wdel(1,j)=wdel(1,j)+e*z(1,j) *1.5;    %%%%  修改梯度學習效率係數取得好的效果
for i=1:IN
vdel(j,i)=vdel(j,i)+  1.5* e*w(1,j,n-1)*dz(1,j)*in(i,1)/a(1,j,n-1);
end
adel(1,j)=adel(1,j)+e *w(1,j,n-1)*dz(1,j)*((r(1,j)-b(1,j,n-1))/a(1,j,n-1))/a(1,j,n-1); 
bdel(1,j)=bdel(1,j)+e *w(1,j,n-1)*dz(1,j)/a(1,j,n-1);
end

derros(1,n)=derros(1,n)+abs( T(m)*log(y(m))+(1-T(m) )*log(1-y(m)) ) ;


mse(1,n)=mse(1,n)+(y(m)-T(m)).^2;
MSE(1,n)=sqrt(mse(1,n));
m=m+1;
end
%plot(n,EEE); 
ee(1,n)=EEE;
for j=1:HN
wdel(1,j)=-wdel(1,j);
for i=1:IN
vdel(j,i)=-vdel(j,i);
end
end
derros(1,n)=-derros(1,n);

% 學習速率的調整
if derros(n)<derros(n-1)
studyspace(1,n)=amlf*studyspace(1,n-1); 
end
if derros(n)>=kkk*derros(n-1)
studyspace(1,n)=betat*studyspace(1,n-1);
end
for j=1:HN
wincrease(1,j)=-studyspace(1,n)*wdel(1,j)+mc*(w(1,j,n-1)-w(1,j,n-2));
w(1,j,n)=w(1,j,n-1)+wincrease(1,j);
for i=1:IN
vincrease(j,i)=-studyspace(1,n)*vdel(j,i)+mc*(v(j,i,n-1)-v(j,i,n-2));
v(j,i,n)=v(j,i,n-1)+vincrease(j,i);
end
aincrease(1,j)=-studyspace(1,n)*adel(1,j)+mc*(a(1,j,n-1)-a(1,j,n-2));
a(1,j,n)=a(1,j,n-1)+aincrease(1,j); 
bincrease(1,j)=-studyspace(1,n)*bdel(1,j)+mc*(b(1,j,n-1)-b(1,j,n-2));
b(1,j,n)=b(1,j,n-1)+bincrease(1,j);
end
end
%網路誤差曲線
toc
plot(MSE(4:N+2)/Q);    title('GA優化後的誤差')

%%%%%%%%5

figure(3)

plot(T,'r')   %title('目標訊號')

hold on 
plot(y,'g*'); title('GA優化後Wnn的輸出')