当前位置:首页 > 代码 > 正文

bp算法的代码(bp算法代码注释)

admin 发布:2022-12-19 13:03 107


本篇文章给大家谈谈bp算法的代码,以及bp算法代码注释对应的知识点,希望对各位有所帮助,不要忘了收藏本站喔。

本文目录一览:

下图用matlab怎么编bp算法代码

%读取训练数据

[f1,f2,f3,f4,class] = textread('trainData.txt' , '%f%f%f%f%f',150);

%特征值归一化

[input,minI,maxI] = premnmx( [f1 , f2 , f3 , f4 ]') ;

%构造输出矩阵

s = length( class) ;

output = zeros( s , 3 ) ;

for i = 1 : s

output( i , class( i ) ) = 1 ;

end

%创建神经网络

net = newff( minmax(input) , [10 3] , { 'logsig' 'purelin' } , 'traingdx' ) ;

%设置训练参数

net.trainparam.show = 50 ;

net.trainparam.epochs = 500 ;

net.trainparam.goal = 0.01 ;

net.trainParam.lr = 0.01 ;

%开始训练

net = train( net, input , output' ) ;

%读取测试数据

[t1 t2 t3 t4 c] = textread('testData.txt' , '%f%f%f%f%f',150);

%测试数据归一化

testInput = tramnmx ( [t1,t2,t3,t4]' , minI, maxI ) ;

%仿真

Y = sim( net , testInput )

%统计识别正确率

[s1 , s2] = size( Y ) ;

hitNum = 0 ;

for i = 1 : s2

[m , Index] = max( Y( : , i ) ) ;

if( Index == c(i) )

hitNum = hitNum + 1 ;

end

end

sprintf('识别率是 %3.3f%%',100 * hitNum / s2 )

看了你的数据,你至少要有的类标号吧,不知道你哪里是输入向量,哪里是输出向量

求BP神经网络算法的C++源代码

// AnnBP.cpp: implementation of the CAnnBP class.

//

//////////////////////////////////////////////////////////////////////

#include "StdAfx.h"

#include "AnnBP.h"

#include "math.h"

//////////////////////////////////////////////////////////////////////

// Construction/Destruction

//////////////////////////////////////////////////////////////////////

CAnnBP::CAnnBP()

{

eta1=0.3;

momentum1=0.3;

}

CAnnBP::~CAnnBP()

{

}

double CAnnBP::drnd()

{

return ((double) rand() / (double) BIGRND);

}

/*** 返回-1.0到1.0之间的双精度随机数 ***/

double CAnnBP::dpn1()

{

return (double) (rand())/(32767/2)-1;

}

/*** 作用函数,目前是S型函数 ***/

double CAnnBP::squash(double x)

{

return (1.0 / (1.0 + exp(-x)));

}

/*** 申请1维双精度实数数组 ***/

double* CAnnBP::alloc_1d_dbl(int n)

{

double *new1;

new1 = (double *) malloc ((unsigned) (n * sizeof (double)));

if (new1 == NULL) {

AfxMessageBox("ALLOC_1D_DBL: Couldn't allocate array of doubles\n");

return (NULL);

}

return (new1);

}

/*** 申请2维双精度实数数组 ***/

double** CAnnBP::alloc_2d_dbl(int m, int n)

{

int i;

double **new1;

new1 = (double **) malloc ((unsigned) (m * sizeof (double *)));

if (new1 == NULL) {

AfxMessageBox("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");

return (NULL);

}

for (i = 0; i m; i++) {

new1[i] = alloc_1d_dbl(n);

}

return (new1);

}

/*** 随机初始化权值 ***/

void CAnnBP::bpnn_randomize_weights(double **w, int m, int n)

{

int i, j;

for (i = 0; i = m; i++) {

for (j = 0; j = n; j++) {

w[i][j] = dpn1();

}

}

}

/*** 0初始化权值 ***/

void CAnnBP::bpnn_zero_weights(double **w, int m, int n)

{

int i, j;

for (i = 0; i = m; i++) {

for (j = 0; j = n; j++) {

w[i][j] = 0.0;

}

}

}

/*** 设置随机数种子 ***/

void CAnnBP::bpnn_initialize(int seed)

{

CString msg,s;

msg="Random number generator seed:";

s.Format("%d",seed);

AfxMessageBox(msg+s);

srand(seed);

}

/*** 创建BP网络 ***/

BPNN* CAnnBP::bpnn_internal_create(int n_in, int n_hidden, int n_out)

{

BPNN *newnet;

newnet = (BPNN *) malloc (sizeof (BPNN));

if (newnet == NULL) {

printf("BPNN_CREATE: Couldn't allocate neural network\n");

return (NULL);

}

newnet-input_n = n_in;

newnet-hidden_n = n_hidden;

newnet-output_n = n_out;

newnet-input_units = alloc_1d_dbl(n_in + 1);

newnet-hidden_units = alloc_1d_dbl(n_hidden + 1);

newnet-output_units = alloc_1d_dbl(n_out + 1);

newnet-hidden_delta = alloc_1d_dbl(n_hidden + 1);

newnet-output_delta = alloc_1d_dbl(n_out + 1);

newnet-target = alloc_1d_dbl(n_out + 1);

newnet-input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);

newnet-hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);

newnet-input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);

newnet-hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);

return (newnet);

}

/* 释放BP网络所占地内存空间 */

void CAnnBP::bpnn_free(BPNN *net)

{

int n1, n2, i;

n1 = net-input_n;

n2 = net-hidden_n;

free((char *) net-input_units);

free((char *) net-hidden_units);

free((char *) net-output_units);

free((char *) net-hidden_delta);

free((char *) net-output_delta);

free((char *) net-target);

for (i = 0; i = n1; i++) {

free((char *) net-input_weights[i]);

free((char *) net-input_prev_weights[i]);

}

free((char *) net-input_weights);

free((char *) net-input_prev_weights);

for (i = 0; i = n2; i++) {

free((char *) net-hidden_weights[i]);

free((char *) net-hidden_prev_weights[i]);

}

free((char *) net-hidden_weights);

free((char *) net-hidden_prev_weights);

free((char *) net);

}

/*** 创建一个BP网络,并初始化权值***/

BPNN* CAnnBP::bpnn_create(int n_in, int n_hidden, int n_out)

{

BPNN *newnet;

newnet = bpnn_internal_create(n_in, n_hidden, n_out);

#ifdef INITZERO

bpnn_zero_weights(newnet-input_weights, n_in, n_hidden);

#else

bpnn_randomize_weights(newnet-input_weights, n_in, n_hidden);

#endif

bpnn_randomize_weights(newnet-hidden_weights, n_hidden, n_out);

bpnn_zero_weights(newnet-input_prev_weights, n_in, n_hidden);

bpnn_zero_weights(newnet-hidden_prev_weights, n_hidden, n_out);

return (newnet);

}

void CAnnBP::bpnn_layerforward(double *l1, double *l2, double **conn, int n1, int n2)

{

double sum;

int j, k;

/*** 设置阈值 ***/

l1[0] = 1.0;

/*** 对于第二层的每个神经元 ***/

for (j = 1; j = n2; j++) {

/*** 计算输入的加权总和 ***/

sum = 0.0;

for (k = 0; k = n1; k++) {

sum += conn[k][j] * l1[k];

}

l2[j] = squash(sum);

}

}

/* 输出误差 */

void CAnnBP::bpnn_output_error(double *delta, double *target, double *output, int nj, double *err)

{

int j;

double o, t, errsum;

errsum = 0.0;

for (j = 1; j = nj; j++) {

o = output[j];

t = target[j];

delta[j] = o * (1.0 - o) * (t - o);

errsum += ABS(delta[j]);

}

*err = errsum;

}

/* 隐含层误差 */

void CAnnBP::bpnn_hidden_error(double *delta_h, int nh, double *delta_o, int no, double **who, double *hidden, double *err)

{

int j, k;

double h, sum, errsum;

errsum = 0.0;

for (j = 1; j = nh; j++) {

h = hidden[j];

sum = 0.0;

for (k = 1; k = no; k++) {

sum += delta_o[k] * who[j][k];

}

delta_h[j] = h * (1.0 - h) * sum;

errsum += ABS(delta_h[j]);

}

*err = errsum;

}

/* 调整权值 */

void CAnnBP::bpnn_adjust_weights(double *delta, int ndelta, double *ly, int nly, double **w, double **oldw, double eta, double momentum)

{

double new_dw;

int k, j;

ly[0] = 1.0;

for (j = 1; j = ndelta; j++) {

for (k = 0; k = nly; k++) {

new_dw = ((eta * delta[j] * ly[k]) + (momentum * oldw[k][j]));

w[k][j] += new_dw;

oldw[k][j] = new_dw;

}

}

}

/* 进行前向运算 */

void CAnnBP::bpnn_feedforward(BPNN *net)

{

int in, hid, out;

in = net-input_n;

hid = net-hidden_n;

out = net-output_n;

/*** Feed forward input activations. ***/

bpnn_layerforward(net-input_units, net-hidden_units,

net-input_weights, in, hid);

bpnn_layerforward(net-hidden_units, net-output_units,

net-hidden_weights, hid, out);

}

/* 训练BP网络 */

void CAnnBP::bpnn_train(BPNN *net, double eta, double momentum, double *eo, double *eh)

{

int in, hid, out;

double out_err, hid_err;

in = net-input_n;

hid = net-hidden_n;

out = net-output_n;

/*** 前向输入激活 ***/

bpnn_layerforward(net-input_units, net-hidden_units,

net-input_weights, in, hid);

bpnn_layerforward(net-hidden_units, net-output_units,

net-hidden_weights, hid, out);

/*** 计算隐含层和输出层误差 ***/

bpnn_output_error(net-output_delta, net-target, net-output_units,

out, out_err);

bpnn_hidden_error(net-hidden_delta, hid, net-output_delta, out,

net-hidden_weights, net-hidden_units, hid_err);

*eo = out_err;

*eh = hid_err;

/*** 调整输入层和隐含层权值 ***/

bpnn_adjust_weights(net-output_delta, out, net-hidden_units, hid,

net-hidden_weights, net-hidden_prev_weights, eta, momentum);

bpnn_adjust_weights(net-hidden_delta, hid, net-input_units, in,

net-input_weights, net-input_prev_weights, eta, momentum);

}

/* 保存BP网络 */

void CAnnBP::bpnn_save(BPNN *net, char *filename)

{

CFile file;

char *mem;

int n1, n2, n3, i, j, memcnt;

double dvalue, **w;

n1 = net-input_n; n2 = net-hidden_n; n3 = net-output_n;

printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);

try

{

file.Open(filename,CFile::modeWrite|CFile::modeCreate|CFile::modeNoTruncate);

}

catch(CFileException* e)

{

e-ReportError();

e-Delete();

}

file.Write(n1,sizeof(int));

file.Write(n2,sizeof(int));

file.Write(n3,sizeof(int));

memcnt = 0;

w = net-input_weights;

mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(double)));

// mem = (char *) malloc (((n1+1) * (n2+1) * sizeof(double)));

for (i = 0; i = n1; i++) {

for (j = 0; j = n2; j++) {

dvalue = w[i][j];

//fastcopy(mem[memcnt], dvalue, sizeof(double));

fastcopy(mem[memcnt], dvalue, sizeof(double));

memcnt += sizeof(double);

}

}

file.Write(mem,sizeof(double)*(n1+1)*(n2+1));

free(mem);

memcnt = 0;

w = net-hidden_weights;

mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(double)));

// mem = (char *) malloc (((n2+1) * (n3+1) * sizeof(double)));

for (i = 0; i = n2; i++) {

for (j = 0; j = n3; j++) {

dvalue = w[i][j];

fastcopy(mem[memcnt], dvalue, sizeof(double));

// fastcopy(mem[memcnt], dvalue, sizeof(double));

memcnt += sizeof(double);

}

}

file.Write(mem, (n2+1) * (n3+1) * sizeof(double));

// free(mem);

file.Close();

return;

}

/* 从文件中读取BP网络 */

BPNN* CAnnBP::bpnn_read(char *filename)

{

char *mem;

BPNN *new1;

int n1, n2, n3, i, j, memcnt;

CFile file;

try

{

file.Open(filename,CFile::modeRead|CFile::modeCreate|CFile::modeNoTruncate);

}

catch(CFileException* e)

{

e-ReportError();

e-Delete();

}

// printf("Reading '%s'\n", filename);// fflush(stdout);

file.Read(n1, sizeof(int));

file.Read(n2, sizeof(int));

file.Read(n3, sizeof(int));

new1 = bpnn_internal_create(n1, n2, n3);

// printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);

// printf("Reading input weights..."); // fflush(stdout);

memcnt = 0;

mem = (char *) malloc (((n1+1) * (n2+1) * sizeof(double)));

file.Read(mem, ((n1+1)*(n2+1))*sizeof(double));

for (i = 0; i = n1; i++) {

for (j = 0; j = n2; j++) {

//fastcopy((new1-input_weights[i][j]), mem[memcnt], sizeof(double));

fastcopy((new1-input_weights[i][j]), mem[memcnt], sizeof(double));

memcnt += sizeof(double);

}

}

free(mem);

// printf("Done\nReading hidden weights..."); //fflush(stdout);

memcnt = 0;

mem = (char *) malloc (((n2+1) * (n3+1) * sizeof(double)));

file.Read(mem, (n2+1) * (n3+1) * sizeof(double));

for (i = 0; i = n2; i++) {

for (j = 0; j = n3; j++) {

//fastcopy((new1-hidden_weights[i][j]), mem[memcnt], sizeof(double));

fastcopy((new1-hidden_weights[i][j]), mem[memcnt], sizeof(double));

memcnt += sizeof(double);

}

}

free(mem);

file.Close();

printf("Done\n"); //fflush(stdout);

bpnn_zero_weights(new1-input_prev_weights, n1, n2);

bpnn_zero_weights(new1-hidden_prev_weights, n2, n3);

return (new1);

}

void CAnnBP::CreateBP(int n_in, int n_hidden, int n_out)

{

net=bpnn_create(n_in,n_hidden,n_out);

}

void CAnnBP::FreeBP()

{

bpnn_free(net);

}

void CAnnBP::Train(double *input_unit,int input_num, double *target,int target_num, double *eo, double *eh)

{

for(int i=1;i=input_num;i++)

{

net-input_units[i]=input_unit[i-1];

}

for(int j=1;j=target_num;j++)

{

net-target[j]=target[j-1];

}

bpnn_train(net,eta1,momentum1,eo,eh);

}

void CAnnBP::Identify(double *input_unit,int input_num,double *target,int target_num)

{

for(int i=1;i=input_num;i++)

{

net-input_units[i]=input_unit[i-1];

}

bpnn_feedforward(net);

for(int j=1;j=target_num;j++)

{

target[j-1]=net-output_units[j];

}

}

void CAnnBP::Save(char *filename)

{

bpnn_save(net,filename);

}

void CAnnBP::Read(char *filename)

{

net=bpnn_read(filename);

}

void CAnnBP::SetBParm(double eta, double momentum)

{

eta1=eta;

momentum1=momentum;

}

void CAnnBP::Initialize(int seed)

{

bpnn_initialize(seed);

}

神经网络BP算法求代码

输入节点数为3x3x5=45,输出节点数为3x3+2=11,隐节点数通过试凑法得出。

BP神经网络的Matlab代码见附件,修改节点数、增加归一化和反归一化过程即可。

BP算法,误差反向传播(Error Back Propagation, BP)算法。BP算法的基本思想是,学习过程由信号的正向传播与误差的反向传播两个过程组成。由于多层前馈网络的训练经常采用误差反向传播算法,人们也常把将多层前馈网络直接称为BP网络。

1)正向传播:输入样本-输入层-各隐层(处理)-输出层

注1:若输出层实际输出与期望输出(教师信号)不符,则转入2)(误差反向传播过程)

2)误差反向传播:输出误差(某种形式)-隐层(逐层)-输入层

其主要目的是通过将输出误差反传,将误差分摊给各层所有单元,从而获得各层单元的误差信号,进而修正各单元的权值(其过程,是一个权值调整的过程)。

注2:权值调整的过程,也就是网络的学习训练过程(学习也就是这么的由来,权值调整)。

求基于BP神经网络的图像复原算法的matlab代码

function Solar_SAE

tic;

n = 300;

m=20;

train_x = [];

test_x = [];

for i = 1:n

%filename = strcat(['D:\Program Files\MATLAB\R2012a\work\DeepLearn\Solar_SAE\64_64_3train\' num2str(i,'%03d') '.bmp']);

%filename = strcat(['E:\matlab\work\c0\TrainImage' num2str(i,'%03d') '.bmp']);

filename = strcat(['E:\image restoration\3-(' num2str(i) ')-4.jpg']);

b = imread(filename);

%c = rgb2gray(b);

c=b;

[ImageRow ImageCol] = size(c);

c = reshape(c,[1,ImageRow*ImageCol]);

train_x = [train_x;c];

end

for i = 1:m

%filename = strcat(['D:\Program Files\MATLAB\R2012a\work\DeepLearn\Solar_SAE\64_64_3test\' num2str(i,'%03d') '.bmp']);

%filename = strcat(['E:\matlab\work\c0\TestImage' num2str(i+100,'%03d') '-1.bmp']);

filename = strcat(['E:\image restoration\3-(' num2str(i+100) ').jpg']);

b = imread(filename);

%c = rgb2gray(b);

c=b;

[ImageRow ImageCol] = size(c);

c = reshape(c,[1,ImageRow*ImageCol]);

test_x = [test_x;c];

end

train_x = double(train_x)/255;

test_x = double(test_x)/255;

%train_y = double(train_y);

%test_y = double(test_y);

% Setup and train a stacked denoising autoencoder (SDAE)

rng(0);

%sae = saesetup([4096 500 200 50]);

%sae.ae{1}.activation_function = 'sigm';

%sae.ae{1}.learningRate = 0.5;

%sae.ae{1}.inputZeroMaskedFraction = 0.0;

%sae.ae{2}.activation_function = 'sigm';

%sae.ae{2}.learningRate = 0.5

%%sae.ae{2}.inputZeroMaskedFraction = 0.0;

%sae.ae{3}.activation_function = 'sigm';

%sae.ae{3}.learningRate = 0.5;

%sae.ae{3}.inputZeroMaskedFraction = 0.0;

%sae.ae{4}.activation_function = 'sigm';

%sae.ae{4}.learningRate = 0.5;

%sae.ae{4}.inputZeroMaskedFraction = 0.0;

%opts.numepochs = 10;

%opts.batchsize = 50;

%sae = saetrain(sae, train_x, opts);

%visualize(sae.ae{1}.W{1}(:,2:end)');

% Use the SDAE to initialize a FFNN

nn = nnsetup([4096 1500 500 200 50 200 500 1500 4096]);

nn.activation_function = 'sigm';

nn.learningRate = 0.03;

nn.output = 'linear'; % output unit 'sigm' (=logistic), 'softmax' and 'linear'

%add pretrained weights

%nn.W{1} = sae.ae{1}.W{1};

%nn.W{2} = sae.ae{2}.W{1};

%nn.W{3} = sae.ae{3}.W{1};

%nn.W{4} = sae.ae{3}.W{2};

%nn.W{5} = sae.ae{2}.W{2};

%nn.W{6} = sae.ae{1}.W{2};

%nn.W{7} = sae.ae{2}.W{2};

%nn.W{8} = sae.ae{1}.W{2};

% Train the FFNN

opts.numepochs = 30;

opts.batchsize = 150;

tx = test_x(14,:);

nn1 = nnff(nn,tx,tx);

ty1 = reshape(nn1.a{9},64,64);

nn = nntrain(nn, train_x, train_x, opts);

toc;

tic;

nn2 = nnff(nn,tx,tx);

toc;

tic;

ty2 = reshape(nn2.a{9},64,64);

tx = reshape(tx,64,64);

tz = tx - ty2;

tz = im2bw(tz,0.1);

%imshow(tx);

%figure,imshow(ty2);

%figure,imshow(tz);

ty = cat(2,tx,ty2,tz);

montage(ty);

filename3 = strcat(['E:\image restoration\3.jpg']);

e=imread(filename3);

f= rgb2gray(e);

f=imresize(f,[64,64]);

%imshow(ty2);

f=double (f)/255;

[PSNR, MSE] = psnr(ty2,f)

imwrite(ty2,'E:\image restoration\bptest.jpg','jpg');

toc;

%visualize(ty);

%[er, bad] = nntest(nn, tx, tx);

%assert(er 0.1, 'Too big error');

bp算法的代码的介绍就聊到这里吧,感谢你花时间阅读本站内容,更多关于bp算法代码注释、bp算法的代码的信息别忘了在本站进行查找喔。

版权说明:如非注明,本站文章均为 AH站长 原创,转载请注明出处和附带本文链接;

本文地址:http://ahzz.com.cn/post/8517.html


取消回复欢迎 发表评论:

分享到

温馨提示

下载成功了么?或者链接失效了?

联系我们反馈

立即下载