% 任務(wù)2: ELM 網(wǎng)絡(luò)
% Date: 2021-10-15
% Author: Zhao-Jichao
clear
clc
%% 訓練
% 導(dǎo)入數(shù)據(jù)集
data = [760,4550,4550,6300]; % 輸入數(shù)據(jù)
label = [500, 500, 880]; % 示教數(shù)據(jù)輸出
[N,n] = size(data); % 返回輸入數(shù)據(jù)的維度
L = 7; % 隱層節(jié)點個數(shù)
m = 3; % 要分的類別數(shù)
% 初始化權(quán)重和偏置矩陣
W = rand(n,L)*2-1; % rand 隨機生成范圍是 (0,1),*2-1運算后為 (-1,1)
b_1 = rand(1,L); % 隨機生成 b 偏置矩陣
ind = ones(N,1); b = b_1(ind,:); % 將生成的 b 矩陣擴充成 N*L 維度的矩陣,為了計算
H = G(data*W+b); % 得到 H
beta = pinv(H)*label; % 求解出來輸出權(quán)重 beta 的最小二乘解
output = H * beta; % 計算實際輸出
%% 驗證
validataData = [4580 6000 1290 3960];
G(validataData*W+b) * beta
%% 激活函數(shù)的定義
function out = G(in)
out = 1./(1 + exp(-in));
% out = x;
end
%% I. 清空環(huán)境變量
clear
clc
%% II. 訓練集/測試集產(chǎn)生
% 1. 導(dǎo)入數(shù)據(jù)
load spectra_data.mat
% 2. 隨機產(chǎn)生訓練集和測試集
temp = randperm(size(NIR,1)); % randperm 整數(shù)的隨機排列
% 訓練集――50個樣本
P_train = NIR(temp(1:50),:)'; % 401x50
T_train = octane(temp(1:50),:)'; % 1x50
% 測試集――10個樣本
P_test = NIR(temp(51:end),:)';
T_test = octane(temp(51:end),:)';
% 有了隨機,效果更好
%% III. 數(shù)據(jù)歸一化
% 1. 訓練集
[Pn_train,inputps] = mapminmax(P_train);
Pn_test = mapminmax('apply',P_test,inputps);
% 2. 測試集
[Tn_train,outputps] = mapminmax(T_train);
Tn_test = mapminmax('apply',T_test,outputps);
%% IV. ELM創(chuàng)建/訓練
[IW,B,LW,TF,TYPE] = elmtrain(Pn_train,Tn_train,300,'sig',0);
%% V. ELM仿真測試
tn_sim = elmpredict(Pn_test,IW,B,LW,TF,TYPE);
% 1. 反歸一化
T_sim = mapminmax('reverse',tn_sim,outputps);
%% VI. 結(jié)果對比
result = [T_test' T_sim'];
% 1. 均方誤差
E = mse(T_sim - T_test);
% 2. 決定系數(shù)
N = length(T_test);
R2=(N*sum(T_sim.*T_test)-sum(T_sim)*sum(T_test))^2/((N*sum((T_sim).^2)-(sum(T_sim))^2)*(N*sum((T_test).^2)-(sum(T_test))^2));
%% VII. 繪圖
figure(1)
plot(1:N,T_test,'r-*',1:N,T_sim,'b:o')
grid on
legend('真實值','預(yù)測值')
xlabel('樣本編號')
ylabel('辛烷值')
string = {'測試集辛烷值含量預(yù)測結(jié)果對比(ELM)';['(mse = ' num2str(E) ' R^2 = ' num2str(R2) ')']};
title(string)
%%
function [IW,B,LW,TF,TYPE] = elmtrain(P,T,N,TF,TYPE)
% ELMTRAIN Create and Train a Extreme Learning Machine
% Syntax
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,N,TF,TYPE)
% Description
% Input
% P - Input Matrix of Training Set (R*Q)
% T - Output Matrix of Training Set (S*Q)
% N - Number of Hidden Neurons (default = Q)
% TF - Transfer Function:
% 'sig' for Sigmoidal function (default)
% 'sin' for Sine function
% 'hardlim' for Hardlim function
% TYPE - Regression (0,default) or Classification (1)
% Output
% IW - Input Weight Matrix (N*R)
% B - Bias Matrix (N*1)
% LW - Layer Weight Matrix (N*S)
% Example
% Regression:
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',0)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% Classification:
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',1)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% See also ELMPREDICT
% Yu Lei,11-7-2010
% Copyright www.matlabsky.com
% $Revision:1.0 $
if nargin < 2
error('ELM:Arguments','Not enough input arguments.');
end
if nargin < 3
N = size(P,2);
end
if nargin < 4
TF = 'sig';
end
if nargin < 5
TYPE = 0;
end
if size(P,2) ~= size(T,2)
error('ELM:Arguments','The columns of P and T must be same.');
end
[R,Q] = size(P);
if TYPE == 1
T = ind2vec(T);
end
[S,Q] = size(T);
% Randomly Generate the Input Weight Matrix
IW = rand(N,R) * 2 - 1;
% Randomly Generate the Bias Matrix
B = rand(N,1);
BiasMatrix = repmat(B,1,Q);
% Calculate the Layer Output Matrix H
tempH = IW * P + BiasMatrix;
switch TF
case 'sig'
H = 1 ./ (1 + exp(-tempH));
case 'sin'
H = sin(tempH);
case 'hardlim'
H = hardlim(tempH);
end
% Calculate the Output Weight Matrix
LW = pinv(H') * T';
end
本文摘自 :https://blog.51cto.com/u