Novel Adaptive Fusion Scheme For Cooperative Spectrum Sensing Matlab Help

function [weightVector,biasTerm,learningCurve]= …
Func_APA1(K,trainInput,trainTarget,testInput,testTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
[inputDimension,trainSize] = size(trainInput);

if flagLearningCurve
learningCurve = zeros(trainSize,1);
learningCurve(1:K) = mean(testTarget.^2)*ones(K,1);
else
learningCurve = [];
end
weightVector = zeros(inputDimension,1);
biasTerm = 0;
% training
for n = 1:trainSize-K+1
networkOutput = trainInput(:,n:n+K-1)’*weightVector + biasTerm;
aprioriErr = trainTarget(n:n+K-1) – networkOutput;
weightVector = weightVector + stepSizeWeightVector*trainInput(:,n:n+K-1)*aprioriErr;
biasTerm = biasTerm + stepSizeBias*sum(aprioriErr);
if flagLearningCurve
% testing
err = testTarget -(testInput’*weightVector + biasTerm);
learningCurve(n+K-1) = mean(err.^2);
end
end
return

function [YY,XX] = Func_EKF(Ap,Xint_v,Uk,Qu,Vk,Qv,C,n,Wk,W,V)

Ap(2,:) = 0;
inx = 1;

for ii = 1:1:length(Ap)-1
Ap(ii+1,ii) = 1;
end

UUk = [Uk(inx); 0; 0; 0; 0];
PPk = (Xint_v*Xint_v’);
VVk = [Vk(inx); 0; 0; 0; 0];
Qv = V*V’;

for ii = 1:1:length(Xint_v)
XKk(ii,1) = Xint_v(ii)^2;
end

PPk = Ap*PPk*Ap’;
Kk = PPk*C’*inv( (C*PPk*C’) + (V*Qv*V’) );

for ii = 1:1:length(Xint_v)
XUPK(ii,1) = XKk(ii)^2 + UUk(ii);
Zk(ii,1) = cos(XUPK(ii)) + VVk(ii);
end

for ii = 1:1:length(XKk)
XBARk(ii,1) = XKk(ii) + Kk(ii)*(Zk(ii) – (cos(XKk(ii)))) ;
end

II = eye(5,5);
Pk = ( II – Kk*C)*PPk;

for ii = 1:1:n
UUk = [Uk(ii+1); 0; 0; 0; 0];
PPk = XBARk*XBARk’;
VVk = [Vk(ii+1); 0; 0; 0; 0];
XKk = exp(-XBARk);
PPkM = Ap*PPk*Ap’;
Kk = PPkM*C’*inv( (C*PPkM*C’) + (V*Qv*V’) );

for nn = 1:1:length(XBARk)
XUPK(nn) = exp(-XKk(nn)) + UUk(nn);
Zk(nn) = cos(XUPK(nn)) + VVk(nn);
end

for in = 1:1:length(XUPK)
XNEW(in) = XBARk(in) + Kk(in)*(Zk(in) – cos(XBARk(in)));
end

II = eye(5,5);
Pk = (II – Kk*C)*PPkM;
XBARk = XNEW;
OUTX(ii) = XBARk(1,1);
OUTY(ii) = Zk(1,1);
end

YY = OUTY;
XX = OUTX;

end

function [expansionCoefficient,weightVector,biasTerm,learningCurve] = …
Func_KAPA1(K,trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,stepSizeFeatureVector,stepSizeWeightVector,stepSizeBias,flagLearningCurve)

[inputDimension,trainSize] = size(trainInput);
testSize = length(testTarget);
expansionCoefficient = zeros(trainSize,1);
networkOutput = zeros(K,1);
weightVector = zeros(inputDimension,1);
biasTerm = 0;
if flagLearningCurve
learningCurve = zeros(trainSize,1);
learningCurve(1) = mean(testTarget.^2);
else
learningCurve = [];
end
expansionCoefficient(1) = stepSizeFeatureVector*trainTarget(1);
% start training
for n = 2:K
ii = 1:n-1;
networkOutput(K) = expansionCoefficient(ii)’*…
ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*trainInput(:,n) + biasTerm;
predictionError = trainTarget(n) – networkOutput(K);
% updating
weightVector = weightVector + stepSizeWeightVector*trainInput(:,n)*predictionError;
biasTerm = biasTerm + stepSizeBias*predictionError;
% updating
expansionCoefficient(n) = stepSizeFeatureVector*predictionError;
if flagLearningCurve
% testing
y_te = zeros(testSize,1);
for jj = 1:testSize
ii = 1:n;
y_te(jj) = expansionCoefficient(ii)’*…
ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*testInput(:,jj) + biasTerm;
end
err = testTarget – y_te;
learningCurve(n) = mean(err.^2);
end
end
for n = K+1:trainSize
% filtering
ii = 1:n-1;
for kk = 1:K
networkOutput(kk) = expansionCoefficient(ii)’*…
ker_eval(trainInput(:,n+kk-K),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*trainInput(:,n+kk-K) + biasTerm;
end
aprioriErr = trainTarget(n-K+1:n) – networkOutput;
% updating
weightVector = weightVector + stepSizeWeightVector*trainInput(:,n-K+1:n)*aprioriErr;
biasTerm = biasTerm + stepSizeBias*sum(aprioriErr);
expansionCoefficient(n-K+1:n) = expansionCoefficient(n-K+1:n) + stepSizeFeatureVector*aprioriErr;
if flagLearningCurve
% testing
y_te = zeros(testSize,1);
for jj = 1:testSize
ii = 1:n;
y_te(jj) = expansionCoefficient(ii)’*…
ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*testInput(:,jj) + biasTerm;
end
err = testTarget – y_te;
learningCurve(n) = mean(err.^2);
end
end
return

function [x_aposteriori P_aposteriori] = Func_KF(z,Q,R,x_aposteriori_last,P_aposteriori_last)

x_apriori = x_aposteriori_last;
P_apriori = P_aposteriori_last + Q;
K = P_apriori / (P_apriori + R);
x_aposteriori = x_apriori + K * (z – x_apriori);
P_aposteriori = (eye(length(x_aposteriori)) – K) * P_apriori;

end

 

function [expansionCoefficient,learningCurve] = …
Func_KRLS(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor,flagLearningCurve)
[inputDimension,trainSize] = size(trainInput);
testSize = length(testTarget);
expansionCoefficient = zeros(trainSize,1);
if flagLearningCurve
learningCurve = zeros(trainSize,1);
learningCurve(1) = mean(testTarget.^2);
else
learningCurve = [];
end

Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
expansionCoefficient(1) = Q_matrix*trainTarget(1);
% start training
for n = 2:trainSize
ii = 1:n-1;
k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
f_vector = Q_matrix*k_vector;
s = 1/(regularizationFactor*forgettingFactor^(n)+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) – k_vector’*f_vector);
Q_tmp = zeros(n,n);
Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector’*s;
Q_tmp(ii,n) = -f_vector*s;
Q_tmp(n,ii) = Q_tmp(ii,n)’;
Q_tmp(n,n) = s;
Q_matrix = Q_tmp;
error = trainTarget(n) – k_vector’*expansionCoefficient(ii);
% updating
expansionCoefficient(n) = s*error;
expansionCoefficient(ii) = expansionCoefficient(ii) – f_vector*expansionCoefficient(n);

if flagLearningCurve
% testing
y_te = zeros(testSize,1);
for jj = 1:testSize
ii = 1:n;
y_te(jj) = expansionCoefficient(ii)’*…
ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel);
end
err = testTarget – y_te;
learningCurve(n) = mean(err.^2);
end
end
return

function G = gramMatrix(data,typeKernel,paramKernel)
[inputDimension,dataSize] = size(data);
G = zeros(dataSize,dataSize);

for ii = 1:dataSize
jj = ii:dataSize;
G(ii,jj) = ker_eval(data(:,ii),data(:,jj),typeKernel,paramKernel);
G(jj,ii) = G(ii,jj);
end
return

function y = ker_eval(X1,X2,ker_type,ker_param)
N1 = size(X1,2);
N2 = size(X2,2);
if strcmp(ker_type,’Gauss’)
if N1 == N2
y = (exp(-sum((X1-X2).^2)*ker_param))’;
elseif N1 == 1
y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))’;
elseif N2 == 1
y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))’;
else
warning(‘error dimension–‘)
end
end
if strcmp(ker_type,’Poly’)
if N1 == N2
y = ((1 + sum(X1.*X2)).^ker_param)’;
elseif N1 == 1
y = ((1 + X1’*X2).^ker_param)’;
elseif N2 == 1
y = ((1 + X2’*X1).^ker_param)’;
else
warning(‘error dimension–‘)
end
end
return

function out = nlG(input,param,flag)

switch flag
case 0
out = param*input;
case 1
out = (1-exp(-param*input))./(1+exp(-param*input));
case 2
out = (1-param)*input + param*input.^2;
case 3
out = sin(param*input);
case 4 %threshold cut off
out = input;
out(find(out>param)) = param;
out(find(out<-param)) = -param;
otherwise
warning(‘nlG’);
end
return

 
function r = rayleigh(T_s,Ns,F_d)

% Generates a Rayleigh fading channel
%
% T_s: symbol period
% Ns : number of symbols
% F_d: maximum doppler spread. It can be a scalar or a vector.
% If F_d is a scalar, then it corresponds to a constant
% mobile speed over the simulation time. If F_d is a vector
% whose length is equal to Ns, then it corresponds to a mobile
% speed that is varying over the simulation time.
%

N = 20; % Assumed number of scatterers

if (max(size(F_d))==1)
f = (T_s*F_d)*[0:Ns-1];
else
f = (T_s*F_d).*[0:Ns-1];
end

phi = (2*pi)*rand(1,N);
C = (randn(1,N)+i*randn(1,N))/sqrt(2*N);
r = zeros(1,Ns);
for j=1:N
r = r+exp(i*2*pi*cos(phi(j))*f)*C(j);
end

 

clc
clear all
close all

% Start Ray_KF
clc; clear all;
rng(1/2);
Q = 10; % maximum doppler frequency in Hz
R = 3; % Symbol frequency in ksps
EWMALength = 15;
lookbackWindow = 30;
UseTrueVariances = ‘true’;
N = 200;
ep = 0.000001; % epsilom for rayliegh channel

% ********************************
% state_variable_initializations
% ********************************
x = zeros(1,N);
x_apriori = zeros(1,N);
x_aposteriori = zeros(1,N);
P_apriori = zeros(1,N);
P_aposteriori = zeros(1,N);

% ********************************
% measurements
% ********************************
z = zeros(1,N);
smoothed_z = zeros(1,N);
K = zeros(1,N);

% ********************************
% initializations
% ********************************
x(1) = rand;
lambda = 1-2/(EWMALength+1);
z(1) = x(1) + sqrt(Q)*randn;
smoothed_z(1) = z(1);
x_aposteriori(1:lookbackWindow) = .5;
P_aposteriori(1:lookbackWindow) = 1;

% ********************************
% rayliegh channel
% ********************************
fm = Q;
fs = R;
P = N;
M = 200;
for p=1:P+1
vector_corr(p)=besselj(0,2*pi*fm*(p-1)/(fs*1000)); % Bessel autocorrelation function according to Jakes’ model
end
auto_correaltion_matrix=toeplitz(vector_corr(1:P))+eye(P)*ep; % adding a small bias, epselonn, to the autocorrelation matrix to overcome the ill conditioning of Yule-Walker equations
AR_parameters=-inv(auto_correaltion_matrix)*vector_corr(2:P+1)’; % Solving the Yule-Walker equations to obtain the model parameters
segma_u=auto_correaltion_matrix(1,1)+vector_corr(2:P+1)*AR_parameters;
KKK=2000;
h=filter(1,[1 AR_parameters.’],wgn(M+KKK,1,10*log10(segma_u),’complex’)); % Use the function Filter to generate the channel coefficients
RC=h(KKK+1:end,:); % Ignore the first KKK samples
% ********************************
% performing_kalman_filtering
% ********************************
for i = 2:N
x(i) = x(i-1) + sqrt(Q)*randn;
z(i) = x(i) + sqrt(R)*randn;
smoothed_z(i) = lambda * smoothed_z(i-1) + (1-lambda)*z(i);
if (i >= lookbackWindow)
% ************************************
% estimating_noise/process_covariance
% ************************************
if strcmpi(UseTrueVariances,’true’)
Ri = R;
Qi = Q;
else
[Ri Qi] = StandardCovEst(z,smoothed_z,i,lookbackWindow);
end
[x_aposteriori(i) P_aposteriori(i)] = Func_KF(z(i),Qi,Ri,x_aposteriori(i-1), P_aposteriori(i-1));
end
end

% ********************************
% plotting_results
% ********************************
figure
r = plot(lookbackWindow:N,x_aposteriori(lookbackWindow:N),’b’);
title(‘Kalman Filtering With Rayleigh Fading’);
axis tight
xlabel(‘Time’)
ylabel(‘Value’)
grid on
drawnow;
% End Ray_KF

% Start Ray_EKF
clc; clear all;
rng(1/2)

Xint_v = [1; 0; 0; 0; 0];
wk = [1 0 0 0 0];
vk = [1 0 0 0 0];

for ii = 1:1:length(Xint_v)
Ap(ii) = Xint_v(ii)*2;
W(ii) = 0;
H(ii) = -sin(Xint_v(ii));
V(ii) = 0;
Wk(ii) = 0;
end

Uk = randn(1,200);
Qu = cov(Uk);
Vk = randn(1,200);
Qv = cov(Vk);
C = [1 0 0 0 0];
n = 100;

[YY XX] = Func_EKF(Ap,Xint_v,Uk,Qu,Vk,Qv,C,n,Wk,W,V);

for it = 1:1:length(XX)
MSE(it) = YY(it) – XX(it);
end
tt = 1:1:length(XX);

P=n;
M=it;
fm=(ii*10)+it;
fs=ii;

for p=1:P+1
vector_corr(p)=besselj(0,2*pi*fm*(p-1)/(fs*1000)); % Bessel autocorrelation function according to Jakes’ model
end

auto_correaltion_matrix=toeplitz(vector_corr(1:P))+eye(P)*0.00000001; % adding a small bias, epselonn, to the autocorrelation matrix to overcome the ill conditioning of Yule-Walker equations
AR_parameters=-inv(auto_correaltion_matrix)*vector_corr(2:P+1)’; % Solving the Yule-Walker equations to obtain the model parameters
segma_u=auto_correaltion_matrix(1,1)+vector_corr(2:P+1)*AR_parameters;

KKK=2000;

h=filter(1,[1 AR_parameters.’],wgn(M+KKK,1,10*log10(segma_u),’complex’)); % Use the function Filter to generate the channel coefficients
chann=h(KKK+1:end,:); % Ignore the first KKK samples

figure
plot((MSE’ + abs(chann)).^2,’b’);
grid on;
axis tight
title(‘Rayleigh Extended Kalman Filtering’);
xlabel(‘Time’)
ylabel(‘Value’)
drawnow;
% End Ray_EKF

% Start Ray_APA
clear all
clc
rng(0.5)
sampleInterval = 0.8e-6; % sampling frequency is 1MHz
numberSymbol = 3000;
dopplerFrequency = 100; % Doppler frequency
trainSize = numberSymbol;
L = 10;
channelLength = 3;
channel = zeros(channelLength,trainSize);
% initializing
inputDimension = 3;
equalizationDelay = 0;
var_n = 1e-3;
sqn = sqrt(var_n);
% ******************************
% Parameters
typeNonlinear = 1;
paramNonlinear = 2;
flagLearningCurve = 1;
K = 10;
stepSizeWeightApa1 = .02/K;
stepSizeBiasApa1 = .002;
% data size
trainSize = 3000;
testSize = 100;
noise_std = 0.1;
% *****************************
k = 10;
errNumApa1 = zeros(k,1);
ensembleLearningCurveApa1 = zeros(trainSize,1);
for k = 1:L
for i=1:channelLength;
channel(i,:) = rayleigh(sampleInterval,numberSymbol,dopplerFrequency);
end
channel = real(channel);
% Input signal
inputSignal = randn(1,trainSize + channelLength);
noise = sqn*randn(1,trainSize);
% Input training signal with data embedding
trainInput = zeros(inputDimension,trainSize);
for kk = 1:trainSize
trainInput(:,kk) = inputSignal(kk:kk+inputDimension-1);
end
% Desired training signal
trainTarget = zeros(trainSize,1);
for ii=1:trainSize
trainTarget(ii) = trainInput(:,ii)’*channel(:,ii);
end
trainTarget = trainTarget + noise’;
% Pass through the nonlinearity
trainTarget = nlG(trainTarget,paramNonlinear,typeNonlinear);
testInput = zeros(inputDimension,testSize);

u = sign(randn(1,testSize*2));
z = filter([1 0.5],1,u);
% Channel noise
ns = noise_std*randn(1,length(z));
% Ouput of the channel
y = z – 0.9*z.^2 + ns;
for kk=1:testSize
testInput(:,kk) = y(kk:kk+inputDimension-1)’;
end
testTarget = zeros(testSize,1);
for ii=1:testSize
testTarget(ii) = u(equalizationDelay+ii);
end
% APA
[weightVectorApa1,biasTermApa1,learningCurveApa1]= …
Func_APA1(K,trainInput,trainTarget,testInput,testTarget,stepSizeWeightApa1,stepSizeBiasApa1,flagLearningCurve);
errNumApa1(k) = sum(testTarget ~= sign(testInput’*weightVectorApa1 + biasTermApa1));
ensembleLearningCurveApa1 = ensembleLearningCurveApa1 + learningCurveApa1;
end
figure
plot(ensembleLearningCurveApa1/L,’b’);
title(‘APA on Rayliegh Channel’)
xlabel(‘iteration’),ylabel(‘MSE’)
grid on
drawnow;
% End Ray_APA

% Start Ray_KAPA & KRLS
clear all
clc
rng(0.5)
var_n = 1e-3;
sqn = sqrt(var_n);
sampleInterval = 0.8e-6; % sampling frequency is 1MHz
numberSymbol = 3000;
dopplerFrequency = 100; % Doppler frequency
trainSize = numberSymbol;
L = 10;
channelLength = 3;
channel = zeros(channelLength,trainSize);
inputDimension = 3;
load MK30
MK30 = MK30(1000:5000);
varNoise = 0.001;
% sizes for the Data to train and test
trainSize = 500;
testSize = 100;
inputSignal = zeros(1,trainSize);
inputSignal = MK30 + sqrt(varNoise)*randn(size(MK30));
inputSignal = inputSignal – mean(inputSignal);
noise = sqn*randn(1,trainSize);
% prediction
testInput = zeros(inputDimension,testSize);
for k = 1:testSize
testInput(:,k) = inputSignal(k+trainSize:k+inputDimension-1+trainSize);
end
predictionHorizon = 1;
% Input training signal with data embedding
trainInput = zeros(inputDimension,trainSize);
for kk = 1:trainSize
trainInput(:,kk) = inputSignal(kk:kk+inputDimension-1);
end
% Desired training signal
% test signal
% Kernel parameters
typeKernel = ‘Gauss’;
paramKernel = 1;
testTarget = zeros(testSize,1);
for ii=1:testSize
testTarget(ii) = inputSignal(ii+inputDimension+trainSize+predictionHorizon-1);
end
for k = 1:L
% disp(k)
for i=1:channelLength
channel(i,:) = rayleigh(sampleInterval,numberSymbol,dopplerFrequency);
end
channel = real(channel);
% Input signal
trainTarget = zeros(trainSize,1);
for ii=1:trainSize
trainTarget(ii) = trainInput(:,ii)’*channel(:,ii);
end
K = 10;
% KAPA
stepSizeKapa1 = .04;
stepSizeWeightKapa1 = 0;
stepSizeBiasKapa1 = 0;
flagLearningCurve = 1;
[expansionCoeffKapa1,weightVectorKapa1,biasTermKapa1,learningCurveKapa1] = …
Func_KAPA1(K,trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,stepSizeKapa1,stepSizeWeightKapa1,stepSizeBiasKapa1,flagLearningCurve);
% *************************

% KRLS
regularizationFactorKrls = 0.1;
forgettingFactorKrls = 1;
[expansionCoefficientKrls,learningCurveKrls] = …
Func_KRLS(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,regularizationFactorKrls,forgettingFactorKrls,flagLearningCurve);
% *************************
end
figure
plot(learningCurveKapa1,’b’);
title(‘KAPA on Rayliegh Channel’)
xlabel(‘iteration’),ylabel(‘MSE’)
set(gca, ‘YScale’,’log’)
grid on
drawnow;

figure
plot(learningCurveKrls,’b’)
title(‘KRLS on Rayliegh Channel’)
xlabel(‘iteration’),ylabel(‘MSE’)
grid on
set(gca, ‘YScale’,’log’)
drawnow;
% End Ray_KAPA & KRLS

 


function [weightVector,biasTerm,learningCurve]= …
Func_APA1(K,trainInput,trainTarget,testInput,testTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
[inputDimension,trainSize] = size(trainInput);

if flagLearningCurve
learningCurve = zeros(trainSize,1);
learningCurve(1:K) = mean(testTarget.^2)*ones(K,1);
else
learningCurve = [];
end
weightVector = zeros(inputDimension,1);
biasTerm = 0;
% training
for n = 1:trainSize-K+1
networkOutput = trainInput(:,n:n+K-1)’*weightVector + biasTerm;
aprioriErr = trainTarget(n:n+K-1) – networkOutput;
weightVector = weightVector + stepSizeWeightVector*trainInput(:,n:n+K-1)*aprioriErr;
biasTerm = biasTerm + stepSizeBias*sum(aprioriErr);
if flagLearningCurve
% testing
err = testTarget -(testInput’*weightVector + biasTerm);
learningCurve(n+K-1) = mean(err.^2);
end
end
return

function [YY,XX] = Func_EKF(Ap,Xint_v,Uk,Qu,Vk,Qv,C,n,Wk,W,V)

Ap(2,:) = 0;
inx = 1;

for ii = 1:1:length(Ap)-1
Ap(ii+1,ii) = 1;
end

UUk = [Uk(inx); 0; 0; 0; 0];
PPk = (Xint_v*Xint_v’);
VVk = [Vk(inx); 0; 0; 0; 0];
Qv = V*V’;

for ii = 1:1:length(Xint_v)
XKk(ii,1) = Xint_v(ii)^2;
end

PPk = Ap*PPk*Ap’;
Kk = PPk*C’*inv( (C*PPk*C’) + (V*Qv*V’) );

for ii = 1:1:length(Xint_v)
XUPK(ii,1) = XKk(ii)^2 + UUk(ii);
Zk(ii,1) = cos(XUPK(ii)) + VVk(ii);
end

for ii = 1:1:length(XKk)
XBARk(ii,1) = XKk(ii) + Kk(ii)*(Zk(ii) – (cos(XKk(ii)))) ;
end

II = eye(5,5);
Pk = ( II – Kk*C)*PPk;

for ii = 1:1:n
UUk = [Uk(ii+1); 0; 0; 0; 0];
PPk = XBARk*XBARk’;
VVk = [Vk(ii+1); 0; 0; 0; 0];
XKk = exp(-XBARk);
PPkM = Ap*PPk*Ap’;
Kk = PPkM*C’*inv( (C*PPkM*C’) + (V*Qv*V’) );

for nn = 1:1:length(XBARk)
XUPK(nn) = exp(-XKk(nn)) + UUk(nn);
Zk(nn) = cos(XUPK(nn)) + VVk(nn);
end

for in = 1:1:length(XUPK)
XNEW(in) = XBARk(in) + Kk(in)*(Zk(in) – cos(XBARk(in)));
end

II = eye(5,5);
Pk = (II – Kk*C)*PPkM;
XBARk = XNEW;
OUTX(ii) = XBARk(1,1);
OUTY(ii) = Zk(1,1);
end

YY = OUTY;
XX = OUTX;

end

function [expansionCoefficient,weightVector,biasTerm,learningCurve] = …
Func_KAPA1(K,trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,stepSizeFeatureVector,stepSizeWeightVector,stepSizeBias,flagLearningCurve)

[inputDimension,trainSize] = size(trainInput);
testSize = length(testTarget);
expansionCoefficient = zeros(trainSize,1);
networkOutput = zeros(K,1);
weightVector = zeros(inputDimension,1);
biasTerm = 0;
if flagLearningCurve
learningCurve = zeros(trainSize,1);
learningCurve(1) = mean(testTarget.^2);
else
learningCurve = [];
end
expansionCoefficient(1) = stepSizeFeatureVector*trainTarget(1);
% start training
for n = 2:K
ii = 1:n-1;
networkOutput(K) = expansionCoefficient(ii)’*…
ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*trainInput(:,n) + biasTerm;
predictionError = trainTarget(n) – networkOutput(K);
% updating
weightVector = weightVector + stepSizeWeightVector*trainInput(:,n)*predictionError;
biasTerm = biasTerm + stepSizeBias*predictionError;
% updating
expansionCoefficient(n) = stepSizeFeatureVector*predictionError;
if flagLearningCurve
% testing
y_te = zeros(testSize,1);
for jj = 1:testSize
ii = 1:n;
y_te(jj) = expansionCoefficient(ii)’*…
ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*testInput(:,jj) + biasTerm;
end
err = testTarget – y_te;
learningCurve(n) = mean(err.^2);
end
end
for n = K+1:trainSize
% filtering
ii = 1:n-1;
for kk = 1:K
networkOutput(kk) = expansionCoefficient(ii)’*…
ker_eval(trainInput(:,n+kk-K),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*trainInput(:,n+kk-K) + biasTerm;
end
aprioriErr = trainTarget(n-K+1:n) – networkOutput;
% updating
weightVector = weightVector + stepSizeWeightVector*trainInput(:,n-K+1:n)*aprioriErr;
biasTerm = biasTerm + stepSizeBias*sum(aprioriErr);
expansionCoefficient(n-K+1:n) = expansionCoefficient(n-K+1:n) + stepSizeFeatureVector*aprioriErr;
if flagLearningCurve
% testing
y_te = zeros(testSize,1);
for jj = 1:testSize
ii = 1:n;
y_te(jj) = expansionCoefficient(ii)’*…
ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel) + weightVector’*testInput(:,jj) + biasTerm;
end
err = testTarget – y_te;
learningCurve(n) = mean(err.^2);
end
end
return

function [x_aposteriori P_aposteriori] = Func_KF(z,Q,R,x_aposteriori_last,P_aposteriori_last)

x_apriori = x_aposteriori_last;
P_apriori = P_aposteriori_last + Q;
K = P_apriori / (P_apriori + R);
x_aposteriori = x_apriori + K * (z – x_apriori);
P_aposteriori = (eye(length(x_aposteriori)) – K) * P_apriori;

end

function [expansionCoefficient,learningCurve] = …
Func_KRLS(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor,flagLearningCurve)
[inputDimension,trainSize] = size(trainInput);
testSize = length(testTarget);
expansionCoefficient = zeros(trainSize,1);
if flagLearningCurve
learningCurve = zeros(trainSize,1);
learningCurve(1) = mean(testTarget.^2);
else
learningCurve = [];
end

Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
expansionCoefficient(1) = Q_matrix*trainTarget(1);
% start training
for n = 2:trainSize
ii = 1:n-1;
k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
f_vector = Q_matrix*k_vector;
s = 1/(regularizationFactor*forgettingFactor^(n)+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) – k_vector’*f_vector);
Q_tmp = zeros(n,n);
Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector’*s;
Q_tmp(ii,n) = -f_vector*s;
Q_tmp(n,ii) = Q_tmp(ii,n)’;
Q_tmp(n,n) = s;
Q_matrix = Q_tmp;
error = trainTarget(n) – k_vector’*expansionCoefficient(ii);
% updating
expansionCoefficient(n) = s*error;
expansionCoefficient(ii) = expansionCoefficient(ii) – f_vector*expansionCoefficient(n);

if flagLearningCurve
% testing
y_te = zeros(testSize,1);
for jj = 1:testSize
ii = 1:n;
y_te(jj) = expansionCoefficient(ii)’*…
ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel);
end
err = testTarget – y_te;
learningCurve(n) = mean(err.^2);
end
end
return

function G = gramMatrix(data,typeKernel,paramKernel)
[inputDimension,dataSize] = size(data);
G = zeros(dataSize,dataSize);
for ii = 1:dataSize
jj = ii:dataSize;
G(ii,jj) = ker_eval(data(:,ii),data(:,jj),typeKernel,paramKernel);
G(jj,ii) = G(ii,jj);
end
return

function y = ker_eval(X1,X2,ker_type,ker_param)
N1 = size(X1,2);
N2 = size(X2,2);
if strcmp(ker_type,’Gauss’)
if N1 == N2
y = (exp(-sum((X1-X2).^2)*ker_param))’;
elseif N1 == 1
y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))’;
elseif N2 == 1
y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))’;
else
warning(‘error dimension–‘)
end
end
if strcmp(ker_type,’Poly’)
if N1 == N2
y = ((1 + sum(X1.*X2)).^ker_param)’;
elseif N1 == 1
y = ((1 + X1’*X2).^ker_param)’;
elseif N2 == 1
y = ((1 + X2’*X1).^ker_param)’;
else
warning(‘error dimension–‘)
end
end
return

%% Start APA
clear all
close all
clc
% initializing
inputDimension = 3;
equalizationDelay = 0;
% ******************************
% Parameters
flagLearningCurve = 1;
K = 10;
stepSizeWeightApa1 = .02/K;
stepSizeBiasApa1 = .002;
% data size
trainSize = 10000;
testSize = 100;
noise_std = 0.1;
% *****************************
MC = 10;
errNumApa1 = zeros(MC,1);
ensembleLearningCurveApa1 = zeros(trainSize,1);
for mc = 1:MC
% Data Formatting
u = sign(randn(1,trainSize*1.5));
z = filter([1 0.5],1,u);
ns = noise_std*randn(1,length(z));
y = z – 0.9*z.^2 + ns;
trainInput = zeros(inputDimension,trainSize);
for k=1:trainSize
trainInput(:,k) = y(k:k+inputDimension-1)’;
end
trainTarget = zeros(trainSize,1);
for ii=1:trainSize
trainTarget(ii) = u(equalizationDelay+ii);
end
u = sign(randn(1,testSize*2));
z = filter([1 0.5],1,u);
ns = noise_std*randn(1,length(z));
y = z – 0.9*z.^2 + ns;
testInput = zeros(inputDimension,testSize);
for k=1:testSize
testInput(:,k) = y(k:k+inputDimension-1)’;
end
testTarget = zeros(testSize,1);
for ii=1:testSize
testTarget(ii) = u(equalizationDelay+ii);
end
% *****************************
% APA
[weightVectorApa1,biasTermApa1,learningCurveApa1]= …
Func_APA1(K,trainInput,trainTarget,testInput,testTarget,stepSizeWeightApa1,stepSizeBiasApa1,flagLearningCurve);
errNumApa1(mc) = sum(testTarget ~= sign(testInput’*weightVectorApa1 + biasTermApa1));
ensembleLearningCurveApa1 = ensembleLearningCurveApa1 + learningCurveApa1;
end
figure
plot(ensembleLearningCurveApa1/MC,’b’)
xlabel(‘Iteration’),ylabel(‘MSE’)
title(‘Affine Projection Algorithm ~ APA’)
grid on
drawnow;
%End APA

%% Start KAPA & KRLS
clear all
clc
% Data Formatting
load MK30
MK30 = MK30(1000:5000);
varNoise = 0.001;
inputDimension = 7;
% sizes for the Data to train and test
trainSize = 500;
testSize = 100;
inputSignal = MK30 + sqrt(varNoise)*randn(size(MK30));
inputSignal = inputSignal – mean(inputSignal);
% Input train data
trainInput = zeros(inputDimension,trainSize);
for k = 1:trainSize
trainInput(:,k) = inputSignal(k:k+inputDimension-1);
end
% Input test data
testInput = zeros(inputDimension,testSize);
for k = 1:testSize
testInput(:,k) = inputSignal(k+trainSize:k+inputDimension-1+trainSize);
end
% prediction
predictionHorizon = 1;
% train signal
trainTarget = zeros(trainSize,1);
for ii=1:trainSize
trainTarget(ii) = inputSignal(ii+inputDimension+predictionHorizon-1);
end
% test signal
testTarget = zeros(testSize,1);
for ii=1:testSize
testTarget(ii) = inputSignal(ii+inputDimension+trainSize+predictionHorizon-1);
end
% Kernel parameters
typeKernel = ‘Gauss’;
paramKernel = 1;

K = 10;
% KAPA
stepSizeKapa1 = .04;
stepSizeWeightKapa1 = 0;
stepSizeBiasKapa1 = 0;
flagLearningCurve = 1;
[expansionCoeffKapa1,weightVectorKapa1,biasTermKapa1,learningCurveKapa1] = …
Func_KAPA1(K,trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,stepSizeKapa1,stepSizeWeightKapa1,stepSizeBiasKapa1,flagLearningCurve);
% *************************

% KRLS
regularizationFactorKrls = 0.1;
forgettingFactorKrls = 1;
[expansionCoefficientKrls,learningCurveKrls] = …
Func_KRLS(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,regularizationFactorKrls,forgettingFactorKrls,flagLearningCurve);
% *************************
figure
plot(learningCurveKapa1,’b’)
title(‘Kernel Affine Projection Algorithm ~ KAPA’)
xlabel(‘Iteration’),ylabel(‘MSE’)
set(gca, ‘YScale’,’log’)
grid on
drawnow;

figure
plot(learningCurveKrls,’b’)
title(‘Kernel Recursive Least Square ~ KRLS’)
xlabel(‘Iteration’),ylabel(‘MSE’)
grid on
set(gca, ‘YScale’,’log’)
drawnow;
% End KAPA & KRLS

%% Start KF
clc; clear all;
rng(1/2);

Q = 1;
R = 0;
EWMALength = 15;
lookbackWindow = 30;
UseTrueVariances = ‘true’;
N = 200;

% ********************************
% state_variable_initializations
% ********************************
x = zeros(1,N);
x_apriori = zeros(1,N);
x_aposteriori = zeros(1,N);
P_apriori = zeros(1,N);
P_aposteriori = zeros(1,N);

% ********************************
% measurements
% ********************************
z = zeros(1,N);
smoothed_z = zeros(1,N);
K = zeros(1,N);

% ********************************
% initializations
% ********************************
x(1) = rand;
lambda = 1-2/(EWMALength+1);
z(1) = x(1) + sqrt(Q)*randn;
smoothed_z(1) = z(1);
x_aposteriori(1:lookbackWindow) = .5;
P_aposteriori(1:lookbackWindow) = 1;

% ********************************
% performing_kalman_filtering
% ********************************
for i = 2:N
x(i) = x(i-1) + sqrt(Q)*randn;
z(i) = x(i) + sqrt(R)*randn;
smoothed_z(i) = lambda * smoothed_z(i-1) + (1-lambda)*z(i);
if (i >= lookbackWindow)
% ************************************
% estimating_noise/process_covariance
% ************************************
if strcmpi(UseTrueVariances,’true’)
Ri = R;
Qi = Q;
else
[Ri Qi] = StandardCovEst(z,smoothed_z,i,lookbackWindow);
end
[x_aposteriori(i) P_aposteriori(i)] = Func_KF(z(i),Qi,Ri,x_aposteriori(i-1), P_aposteriori(i-1));
end
end

% ********************************
% plotting_results
% ********************************
figure
r = plot(lookbackWindow:N,x_aposteriori(lookbackWindow:N),’b’);
title(‘Kalman Filtering’);
axis tight
xlabel(‘Time’)
ylabel(‘Value’)
grid on
drawnow;
% End KF

%% Start EKF
clc; clear all;
rng(1/2)

Xint_v = [1; 0; 0; 0; 0];
wk = [1 0 0 0 0];
vk = [1 0 0 0 0];

for ii = 1:1:length(Xint_v)
Ap(ii) = Xint_v(ii)*2;
W(ii) = 0;
H(ii) = -sin(Xint_v(ii));
V(ii) = 0;
Wk(ii) = 0;
end

Uk = randn(1,200);
Qu = cov(Uk);
Vk = randn(1,200);
Qv = cov(Vk);
C = [1 0 0 0 0];
n = 100;

[YY XX] = Func_EKF(Ap,Xint_v,Uk,Qu,Vk,Qv,C,n,Wk,W,V);

for it = 1:1:length(XX)
MSE(it) = YY(it) – XX(it);
end
tt = 1:1:length(XX);

figure
plot(MSE.^2,’b’);
grid on;
axis tight
title(‘Extended Kalman Filtering’);
xlabel(‘Time’)
ylabel(‘Value’)
drawnow;
% End EKF

%% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ %%

clc
clear all
load data

%1
figure
a=fig1_1(:,1);
b=fig1_1(:,2);
plot(a,b,’b’)
grid on
axis([0 2 -20 0])
title(‘MSD vs µ for NLMS’)
xlabel(‘µ’)
ylabel(‘MSD(dB)’)
drawnow;

%2
figure
subplot(2,1,1)
a=fig2_1(:,1);
b=fig2_1(:,2);
plot(a,b,’r’)

hold on
a=fig2_2(:,1);
b=fig2_2(:,2);
plot(a,b,’g’)

hold on
a=fig2_3(:,1);
b=fig2_3(:,2);
plot(a,b,’b’)
axis([0 100 -1 1.5])
grid on
title(‘Mean of confidence level vs Time ~ NLMS algorithm’)
xlabel(‘n’)
ylabel(‘E(w)’)
legend(‘SNR=10dB’,’SNR=5dB’,’SNR=-23dB’)

subplot(2,1,2)
a=fig7_1(:,1);
b=fig7_1(:,2);
plot(a,b,’r’)

hold on
a=fig7_2(:,1);
b=fig7_2(:,2);
plot(a,b,’g’)

hold on
a=fig7_3(:,1);
b=fig7_3(:,2);
plot(a,b,’b’)
axis([0 100 -1 1.5])
grid on

title(‘Mean of confidence level vs Time ~ RLS algorithm’)
xlabel(‘n’)
ylabel(‘E(w)’)
legend(‘SNR=10dB’,’SNR=5dB’,’SNR=-23dB’)
drawnow;

%3
figure
subplot(2,1,1)
a=fig3_1(:,1);
b=fig3_1(:,2);
plot(a,b,’b-o’)
hold on

a=fig3_2(:,1);
b=fig3_2(:,2);
plot(a,b,’r-x’)
hold on

a=fig3_3(:,1);
b=fig3_3(:,2);
plot(a,b,’g-o’)
axis([0 100 0 1])
grid on

title(‘Performance of proposed approach Qd vs Time’)
xlabel(‘n’)
ylabel(‘Qd’)
legend(‘OR rule’,’NLMS’,’RLS’)

subplot(2,1,2)
a=fig8_1(:,1);
b=fig8_1(:,2);
plot(a,b,’b-o’)
hold on

a=fig8_2(:,1);
b=fig8_2(:,2);
plot(a,b,’r-x’)
hold on

a=fig8_3(:,1);
b=fig8_3(:,2);
plot(a,b,’g-o’)
axis([0 100 0.00001 1])
grid on

title(‘Performance of proposed approach Qf vs Time’)
xlabel(‘n’)
ylabel(‘Qf’)
% legend(‘OR rule’,’NLMS’,’RLS’)
drawnow;

%4
figure
a=fig4_1(:,1);
b=fig4_1(:,2);
plot(a,b,’r-*’)
hold on

a=fig4_2(:,1);
b=fig4_2(:,2);
plot(a,b,’m–‘)
hold on

a=fig4_3(:,1);
b=fig4_3(:,2);
plot(a,b,’go–‘)
hold on

a=fig4_4(:,1);
b=fig4_4(:,2);
plot(a,b,’ks-‘)
hold on

a=fig4_5(:,1);
b=fig4_5(:,2);
plot(a,b,’b-d’)
axis([0.0001 9.5 0.001 10])
grid on
title(‘ROC curves’)
xlabel(‘Qf’)
ylabel(‘Qd’)
legend(‘Proposed approach’,’P.K. Varshney rule’,’OR rule’,’Majority rule’,’AND rule’)
drawnow;

%5
figure
a=fig5_1(:,1);
b=fig5_1(:,2);
plot(a,b,’r-x’)
hold on

a=fig5_2(:,1);
b=fig5_2(:,2);
plot(a,b,’b -O’)
hold on

a=fig5_3(:,1);
b=fig5_3(:,2);
plot(a,b,’b –‘)
axis([0 200 0 1])
grid on

title(‘PU status changes’)
xlabel(‘n’)
ylabel(‘Qf/Qd’)
legend(‘NLMS, u = 1′,’RLS, & = 0.98′,’RLS, & = 1’)
drawnow;

%6
figure
a=fig6_1(:,1);
b=fig6_1(:,2);
plot(a,b,’b –‘)
hold on

a=fig6_2(:,1);
b=fig6_2(:,2);
plot(a,b,’b -O’)
hold on

a=fig6_3(:,1);
b=fig6_3(:,2);
plot(a,b,’r -x’)
axis([0 200 -0.5 1])
grid on

title(‘Received SNR at 1st SU changes’)
xlabel(‘n’)
ylabel(‘W1,n’)
legend(‘RLS, & = 1′,’RLS, & = 0.9′,’NLMS, u = 1’)
drawnow;

Posted on December 28, 2017 in Uncategorized

Share the Story

Back to Top
Share This