/* gcc -g destructive_simple-cnn_8x8-2.cpp -o destructive_simple-cnn_8x8-2 一定の確率で、ニューロやシナプスを破壊して、認識率を調べる */ #include #include #include // 畳み込み static const int _Image_Size = 8; // 入力画像の1辺のピクセル数 static const int _Filter_Size = 3; //畳み込みフィルタのサイズ static const int _Filter_Number = 2; // フィルタの個数 static const int _Pool_Outsize = 3; //プーリング層の出力のサイズ // 全結合 static const int _Input_Node_Number = 18; /*入力層のセル数*/ static const int _hidden_node_number = 6; /*中間層のセル数*/ static const int _output_node_number =4; /*出力層のセル数*/ static const int _alpha = 1; /*学習係数*/ static const int _max_data_number = 100; /*学習データの最大個数*/ static const int _big_number = 100; /*誤差の初期値*/ static const double _limit =0.001; /*誤差の上限値*/ static const int _seed = 199; /*乱数のシード*/ // 教師データ double learning_output[][_output_node_number] ={ {1,0,0,0}, {0,1,0,0}, // "B"である {0,0,1,0}, // "C"である {0,0,0,1}, // "D"である {1,0,0,0}, // "A"である {0,1,0,0}, // "B"である {0,0,1,0}, // "C"である {0,0,0,1} // "D"である }; double learning_input[][_Image_Size][_Image_Size] = { { // "A"の画像イメージ {0,0,0,1,1,0,0,0}, {0,0,0,1,1,0,0,0}, {0,0,1,1,1,1,0,0}, {0,0,1,0,1,0,0,0}, {0,0,1,0,1,0,0,0}, {0,1,1,1,1,1,1,0}, {0,1,0,0,0,0,1,0}, {0,1,0,0,0,0,1,0} }, { // "B"の画像イメージ {0,1,1,1,1,0,0,0}, {0,1,0,0,0,1,0,0}, {0,1,0,0,0,1,0,0}, {0,1,1,1,1,1,0,0}, {0,1,0,0,0,1,0,0}, {0,1,0,0,0,0,1,0}, {0,1,0,0,0,1,0,0}, {0,1,1,1,1,0,0,0} }, { // "C"の画像イメージ {0,0,0,1,1,1,0,0}, {0,0,1,0,0,0,1,0}, {0,1,0,0,0,0,0,0}, {0,1,0,0,0,0,0,0}, {0,1,0,0,0,0,0,0}, {0,1,0,0,0,0,0,0}, {0,0,1,0,0,0,1,0}, {0,0,1,1,1,1,0,0} }, { // "D"の画像イメージ {0,1,1,1,1,1,0,0}, {0,1,0,0,0,1,1,0}, {0,1,0,0,0,0,1,0}, {0,1,0,0,0,0,1,0}, {0,1,0,0,0,0,1,0}, {0,1,0,0,0,0,1,0}, {0,1,0,0,0,1,1,1}, {0,1,1,1,1,1,0,0} }, { // "A"の画像イメージ {0,0,0,0,0,1,0,0}, {0,0,0,0,1,1,0,0}, {0,0,0,1,1,0,1,0}, {0,0,0,1,0,0,1,0}, {0,0,1,0,0,0,1,0}, {0,0,1,1,1,1,1,0}, {0,1,0,0,0,0,1,0}, {0,0,0,0,0,0,1,0} }, { // "B"の画像イメージ {0,0,0,1,1,1,1,0}, {0,0,0,1,0,0,0,1}, {0,0,0,1,0,0,0,1}, {0,0,0,1,1,1,1,1}, {0,0,0,1,0,0,0,1}, {0,0,0,1,0,0,0,1}, {0,0,0,1,1,0,0,1}, {0,0,0,1,1,1,1,0} }, { // "C"の画像イメージ {0,1,1,1,1,0,0,0}, {1,0,0,0,0,0,0,0}, {1,0,0,0,0,0,0,0}, {1,0,0,0,0,0,0,0}, {1,0,0,0,0,0,0,0}, {0,1,0,0,1,0,0,0}, {0,0,1,1,0,0,0,0}, {0,0,0,0,0,0,0,0} }, { // "D"の画像イメージ {0,0,0,1,1,0,0,0}, {0,0,1,0,0,1,0,0}, {0,0,1,0,0,0,1,0}, {0,0,1,0,0,0,1,0}, {0,1,0,0,0,0,1,0}, {0,1,0,0,0,1,1,0}, {0,1,1,1,1,1,0,0}, {0,0,0,0,0,0,0,0} } }; /************************ ddrand()関数 -1から1の間の乱数を生成 ************************/ double ddrand(void) { double rndno ;/*生成した乱数*/ while((rndno = (double)rand()/RAND_MAX) == 1.0) ; //rndno = rndno * 2 -1 ;/*-1から1の間の乱数を生成*/ return rndno; } /************************ drand()関数 -1から1の間の乱数を生成 ************************/ double drand(void) { double rndno ;/*生成した乱数*/ while((rndno = (double)rand()/RAND_MAX) == 1.0) ; rndno = rndno * 2 -1 ;/*-1から1の間の乱数を生成*/ return rndno; } /********************* init_filter()関数 ィルタの初期化 *********************/ void init_filter(double filter[_Filter_Number][_Filter_Size][_Filter_Size]) { for(int i = 0; i < _Filter_Number; i++){ for(int j = 0; j < _Filter_Size; j++){ for(int k = 0; k < _Filter_Size;k++){ filter[i][j][k]=drand() ; } } } } void convolution(double filter[_Filter_Size][_Filter_Size] ,double e[][_Image_Size] ,double convolution_output[][_Image_Size]) ; /*畳み込みの計算*/ double convolution_calculation(double filter[][_Filter_Size] ,double e[][_Image_Size],int i,int j) ;/* フィルタの適用 */ void pooling(double convolution_output[][_Image_Size],double pooling_output[][_Pool_Outsize]) ; /*プーリングの計算*/ double pooling_calculation(double convolution_output[][_Image_Size] ,int x,int y) ;/* 平均値プーリング */ //全結合層関連 void init_weight_hidden_layer(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1]) ; //中間層の重みの初期化 void init_weight_output_layer(double weight_output[_output_node_number][_hidden_node_number + 1]) ; /*出力層の重みの初期化*/ double forward(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1] ,double [_hidden_node_number + 1],double hidden_input[] ,double e[_Input_Node_Number+_output_node_number]) ; /*順方向の計算*/ void output_layer_learning(double weight_output[_hidden_node_number + 1],double hidden_input[] ,double e[_Input_Node_Number + _output_node_number],double o,int k) ; /*出力層の重みの学習*/ void hidden_layer_learning(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1] ,double weight_output[_hidden_node_number + 1],double hidden_input[] ,double e[_Input_Node_Number+_output_node_number],double o,int k) ; /*中間層の重みの学習*/ void print_weight(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1] ,double weight_output[_output_node_number][_hidden_node_number + 1]) ; /*結果の出力*/ double sigmoid_function(double u) ; /*シグモイド関数*/ double destructive_forward(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1], double [_hidden_node_number + 1],double hidden_input[], double e[_Input_Node_Number+_output_node_number], double node_ratio, double synapse_ratio) ; /*順方向の計算*/ /*******************/ /* main()関数 */ /*******************/ int main() { /*畳み込み演算関連*/ /* 2 3 3 */ double filter[_Filter_Number][_Filter_Size][_Filter_Size] ;/*畳み込みフィルタ*/ /* 8 8 */ double convolution_output[_Image_Size][_Image_Size] ;/*畳み込み出力*/ /* 3 3 */ double pooling_output[_Pool_Outsize][_Pool_Outsize] ;/*出力データ*/ /*全結合層関連 6 19 */ double weight_hidden[_hidden_node_number][_Input_Node_Number + 1] ;/*中間層の重み*/ /* 4 7 */ double weight_output[_output_node_number][_hidden_node_number + 1] ;/*出力層の重み*/ /* 100(使っているのは8) 18 + 4 */ double e[_max_data_number][_Input_Node_Number + _output_node_number] ;/*学習データセット*/ /* 7 */ double hidden_input[_hidden_node_number + 1] ;/*中間層の出力*/ /* 4 */ double o[_output_node_number] ;/*出力*/ double err = _big_number ;/*誤差の評価*/ //int i,j,m,n ;/*繰り返しの制御*/ int n_of_e ;/*学習データの個数*/ int count = 0 ;/*繰り返し回数のカウンタ*/ /*乱数の初期化*/ srand(_seed) ; /*畳み込みフィルタの初期化*/ init_filter(filter) ; /*重みの初期化*/ init_weight_hidden_layer(weight_hidden) ; init_weight_output_layer(weight_output) ; //print_weight(weight_hidden,weight_output) ; /*学習データの読み込み*/ // n_of_e=getdata(image,t) ; n_of_e = 8; //printf("学習データの個数:%d\n",n_of_e) ; /*畳み込み処理*/ for(int i = 0; i < n_of_e; i++){/*学習データ毎の繰り返し 8 */ for(int j = 0; j < _Filter_Number; j++){/*フィルタ毎の繰り返し 2 */ /*畳み込みの計算*/ convolution(filter[j], learning_input[i], convolution_output) ; /*プーリングの計算*/ pooling(convolution_output,pooling_output) ; /*プーリング出力を全結合層の入力へコピー*/ for(int m = 0; m<_Pool_Outsize; m++){ // _Pool_Outsize は "3" for(int n = 0; n < _Pool_Outsize; n++){ // _Pool_Outsize は "3" e[i][j * _Pool_Outsize * _Pool_Outsize + _Pool_Outsize * m + n] = pooling_output[m][n] ; } } for(int m = 0; m < _output_node_number; m++){ /* output_node_number; は"4" */ e[i][_Pool_Outsize*_Pool_Outsize*_Filter_Number+m] = learning_output[i][m] ;/*教師データ*/ } } } #if 0 /*学習*/ while( err > _limit){ /*i個の出力層に対応*/ for(int i = 0; i < _output_node_number; i++){ err=0.0 ; for(int j = 0; j < n_of_e; j++){ /*順方向の計算*/ o[i] = forward(weight_hidden, weight_output[i], hidden_input, e[j]) ; /*出力層の重みの調整*/ output_layer_learning(weight_output[i], hidden_input, e[j], o[i], i) ; /*中間層の重みの調整*/ hidden_layer_learning(weight_hidden, weight_output[i], hidden_input, e[j], o[i], i) ; /*誤差の積算*/ err += (o[i] - e[j][_Input_Node_Number+i]) * (o[i] - e[j][_Input_Node_Number+i]); } count++ ; /*誤差の出力*/ printf("%d\t%lf\n",count,err) ; } }/*学習終了*/ #endif //0 /*重みの出力*/ // print_weight(weight_hidden,weight_output) ; weight_hidden[0][0] = -0.894214; weight_hidden[0][1] = -0.589168; weight_hidden[0][2] = 1.352162; weight_hidden[0][3] = 0.912494; weight_hidden[0][4] = 1.946109; weight_hidden[0][5] = 0.193768; weight_hidden[0][6] = -0.405493; weight_hidden[0][7] = -0.085132; weight_hidden[0][8] = -0.951950; weight_hidden[0][9] = -1.206483; weight_hidden[0][10] = 1.390184; weight_hidden[0][11] = -0.017131; weight_hidden[0][12] = -0.265427; weight_hidden[0][13] = 0.262112; weight_hidden[0][14] = -1.105641; weight_hidden[0][15] = 1.401875; weight_hidden[0][16] = -0.137324; weight_hidden[0][17] = -0.094189; weight_hidden[0][18] = -0.562396; weight_hidden[1][0] = 0.206187; weight_hidden[1][1] = 0.455349; weight_hidden[1][2] = 0.837734; weight_hidden[1][3] = 0.146251; weight_hidden[1][4] = -0.354559; weight_hidden[1][5] = 2.108063; weight_hidden[1][6] = -0.893268; weight_hidden[1][7] = -2.477319; weight_hidden[1][8] = 2.014512; weight_hidden[1][9] = 0.572881; weight_hidden[1][10] = -1.649094; weight_hidden[1][11] = 0.646904; weight_hidden[1][12] = 1.472824; weight_hidden[1][13] = 0.530051; weight_hidden[1][14] = 2.254208; weight_hidden[1][15] = -3.473133; weight_hidden[1][16] = -0.938524; weight_hidden[1][17] = -0.009267; weight_hidden[1][18] = -1.309317; weight_hidden[2][0] = 0.227572; weight_hidden[2][1] = 1.232494; weight_hidden[2][2] = -2.537341; weight_hidden[2][3] = 0.365881; weight_hidden[2][4] = -2.846355; weight_hidden[2][5] = -2.043243; weight_hidden[2][6] = -0.261722; weight_hidden[2][7] = 0.520838; weight_hidden[2][8] = 1.012174; weight_hidden[2][9] = 0.024056; weight_hidden[2][10] = 0.495044; weight_hidden[2][11] = 1.327191; weight_hidden[2][12] = -0.493553; weight_hidden[2][13] = -1.601224; weight_hidden[2][14] = 1.186214; weight_hidden[2][15] = 0.538923; weight_hidden[2][16] = -0.579770; weight_hidden[2][17] = 1.887562; weight_hidden[2][18] = 1.349836; weight_hidden[3][0] = -1.136046; weight_hidden[3][1] = -0.663991; weight_hidden[3][2] = -1.115442; weight_hidden[3][3] = -0.938678; weight_hidden[3][4] = 0.707814; weight_hidden[3][5] = 0.111197; weight_hidden[3][6] = 0.873782; weight_hidden[3][7] = 0.674853; weight_hidden[3][8] = -1.298739; weight_hidden[3][9] = -0.686390; weight_hidden[3][10] = 0.577540; weight_hidden[3][11] = -1.054922; weight_hidden[3][12] = 1.409316; weight_hidden[3][13] = -1.802968; weight_hidden[3][14] = -0.237759; weight_hidden[3][15] = 2.128390; weight_hidden[3][16] = 1.408057; weight_hidden[3][17] = 1.964002; weight_hidden[3][18] = -0.134186; weight_hidden[4][0] = 0.852953; weight_hidden[4][1] = 0.063201; weight_hidden[4][2] = -0.889464; weight_hidden[4][3] = 2.378443; weight_hidden[4][4] = 0.142812; weight_hidden[4][5] = -0.515105; weight_hidden[4][6] = -1.532579; weight_hidden[4][7] = 0.196552; weight_hidden[4][8] = -1.700421; weight_hidden[4][9] = -0.140149; weight_hidden[4][10] = -0.233038; weight_hidden[4][11] = 0.228411; weight_hidden[4][12] = -1.001625; weight_hidden[4][13] = -0.353124; weight_hidden[4][14] = -3.399658; weight_hidden[4][15] = -0.454278; weight_hidden[4][16] = -1.002340; weight_hidden[4][17] = -1.961674; weight_hidden[4][18] = 2.020139; weight_hidden[5][0] = -0.566715; weight_hidden[5][1] = -0.783293; weight_hidden[5][2] = -1.035309; weight_hidden[5][3] = -1.362802; weight_hidden[5][4] = -0.964330; weight_hidden[5][5] = -0.461190; weight_hidden[5][6] = -0.874461; weight_hidden[5][7] = -0.007478; weight_hidden[5][8] = -1.040060; weight_hidden[5][9] = 0.168544; weight_hidden[5][10] = 0.503025; weight_hidden[5][11] = -0.290725; weight_hidden[5][12] = 0.086780; weight_hidden[5][13] = -0.325586; weight_hidden[5][14] = 0.019807; weight_hidden[5][15] = 0.063060; weight_hidden[5][16] = 0.143956; weight_hidden[5][17] = 0.289088; weight_hidden[5][18] = -0.132838; weight_output[0][0] = -2.635596; weight_output[0][1] = 3.664741; weight_output[0][2] = 1.614275; weight_output[0][3] = -5.449503; weight_output[0][4] = 1.512428; weight_output[0][5] = -0.180874; weight_output[0][6] = 1.752045; weight_output[1][0] = -1.097646; weight_output[1][1] = -4.989247; weight_output[1][2] = 6.900954; weight_output[1][3] = 3.950271; weight_output[1][4] = -1.800523; weight_output[1][5] = -1.847274; weight_output[1][6] = 2.855253; weight_output[2][0] = 0.860865; weight_output[2][1] = 3.511676; weight_output[2][2] = -2.354413; weight_output[2][3] = 1.022243; weight_output[2][4] = -6.457044; weight_output[2][5] = 0.358115; weight_output[2][6] = 0.421507; weight_output[3][0] = 3.231603; weight_output[3][1] = -4.047048; weight_output[3][2] = -5.750798; weight_output[3][3] = -1.363708; weight_output[3][4] = 4.376589; weight_output[3][5] = -1.055001; weight_output[3][6] = 0.505825; /*学習データに対する出力*/ for(int i = 0; i < n_of_e; i++){ //printf("%d\n",i) ; #if 0 //printf("学習データ\n"); for(int j = 0; j < _Input_Node_Number; j++){ printf("%lf ",e[i][j]) ;/*学習データ*/ } #endif //0 printf("\n") ; // printf("教師データ\n"); for(int j = _Input_Node_Number; j < _Input_Node_Number+_output_node_number; j++){/*教師データ*/ printf("%lf,",e[i][j]) ; } printf("\n") ; double diff = 0.0; for (double s_ratio = 0.0; s_ratio < 1.01; s_ratio += 0.1){ for (int ii = 0; ii < 100; ii++){ for(int j = 0; j < _output_node_number; j++){/*ネットワーク出力*/ double a1 = forward(weight_hidden, weight_output[j], hidden_input, e[i]); double b1 = destructive_forward(weight_hidden, weight_output[j], hidden_input, e[i], 0.00, s_ratio); diff += fabs(a1 -b1); } } diff /= (100 * _output_node_number); printf("%f,%f\n", s_ratio, diff); } #if 0 // printf("ネットワーク出力\n"); for(int j = 0; j < _output_node_number; j++){/*ネットワーク出力*/ printf("%lf, ",forward(weight_hidden, weight_output[j], hidden_input, e[i])) ; printf("%lf, ",destructive_forward(weight_hidden, weight_output[j], hidden_input, e[i], 0.00)) ; } printf("\n") ; #endif #if 0 // printf("破壊ネットワーク出力\n"); for(int j = 0; j < _output_node_number; j++){/*ネットワーク出力*/ printf("%lf, ",destructive_forward(weight_hidden, weight_output[j], hidden_input, e[i], 0.30)) ; } printf("\n") ; #endif } return 0 ; } /********************* init_weight_hidden_layer()関数 中間層の重みの初期化 *********************/ void init_weight_hidden_layer(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1]) { /* 乱数による重みの初期化*/ for(int i = 0; i < _hidden_node_number; i++){ for(int j = 0; j < _Input_Node_Number + 1; j++){ weight_hidden[i][j] = drand() ; } } } /********************** init_weight_output_layer()関数 出力層の重みの初期化 **********************/ void init_weight_output_layer(double weight_output[_output_node_number][_hidden_node_number + 1]) { /* 乱数による重みの初期化*/ for(int i = 0; i < _output_node_number; i++){ for(int j = 0; j < _hidden_node_number + 1; j++){ weight_output[i][j]=drand() ; } } } /********************* convolution()関数 畳み込みの計算 *********************/ void convolution(double filter[][_Filter_Size], double e[][_Image_Size], double convolution_output[][_Image_Size]) { // int i = 0,j = 0 ;/*繰り返しの制御用*/ int startpoint = _Filter_Size / 2 ;/*畳み込み範囲の下限*/ for(int i = startpoint; i < _Image_Size-startpoint; i++){ for(int j = startpoint; j < _Image_Size-startpoint; j++){ convolution_output[i][j] = convolution_calculation(filter,e,i,j) ; } } } /**********************/ /* convolution_calculation()関数 */ /* フィルタの適用 */ /**********************/ double convolution_calculation(double filter[][_Filter_Size] ,double e[][_Image_Size],int i,int j) { // int m,n ;/*繰り返しの制御用*/ double sum = 0 ;/*和の値*/ for(int m = 0; m < _Filter_Size; m++){ for(int n = 0; n < _Filter_Size; n++){ sum += e[i - _Filter_Size / 2 + m][j - _Filter_Size / 2 + n] * filter[m][n]; } } return sum ; } /**********************/ /* pooling()関数 */ /* プーリングの計算 */ /**********************/ void pooling(double convolution_output[][_Image_Size], double pooling_output[][_Pool_Outsize]) { for(int i = 0; i < _Pool_Outsize; i++){ for(int j = 0; j < _Pool_Outsize; j++){ pooling_output[i][j] = pooling_calculation(convolution_output, i * 2 + 1,j * 2 + 1) ; } } } /********************* pooling_calculation()関数 平均値プーリング *********************/ double pooling_calculation(double convolution_output[][_Image_Size], int x, int y) { double ave=0.0 ;/*平均値*/ for(int m = x; m <= x + 1 ; m++){ for(int n = y; n <= y + 1; n++){ ave += convolution_output[m][n] ; } } return ave/4.0 ; } /********************* forward()関数 順方向の計算 *********************/ double forward(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1], double weight_output[_hidden_node_number + 1],double hidden_input[], double e[]) { /*hidden_input計算*/ for(int i = 0; i < _hidden_node_number; i++){ /*重み付き和の計算*/ double u = 0 ;/*重み付き和を求める*/ for(int j = 0; j < _Input_Node_Number; j++){ u += e[j] * weight_hidden[i][j] ; } //u-=weight_hidden[i][j] ;/*しきい値の処理*/ u -= weight_hidden[i][_Input_Node_Number] ;/*しきい値の処理*/ hidden_input[i] = sigmoid_function(u) ; } /*出力outputの計算*/ double output = 0.0 ; for(int i = 0; i < _hidden_node_number; i++){ output += hidden_input[i]*weight_output[i] ; } output -= weight_output[_hidden_node_number] ;/*しきい値の処理*/ return sigmoid_function(output) ; } /**************************** destructive_forward()関数 順方向の計算を意図的に破壊する 破壊方法としては2つ考える。 (1)ニューロンを機能不能とする この場合は、中間層の6ノードのいずれかを機能不能とするだけ hidden_input[i] = sigmoid_function(u) ; を 一定の確率で0にすることで、ニューロ機能を停止させる (2)シナプスを機能不能とする(シナプスを切断する) weight_hidden[i][j] およびweight_output[i] を、一定の確率で0にする ****************************/ double destructive_forward(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1], double weight_output[_hidden_node_number + 1],double hidden_input[], double e[], double node_ratio, double synapse_ratio) { double aa; /*hidden_input計算*/ for(int i = 0; i < _hidden_node_number; i++){ /*重み付き和の計算*/ double u = 0 ;/*重み付き和を求める*/ for(int j = 0; j < _Input_Node_Number; j++){ aa = weight_hidden[i][j]; if (ddrand() <= synapse_ratio){ aa = 0.0; } u += e[j] * aa ; } //u-=weight_hidden[i][j] ;/*しきい値の処理*/ aa = weight_hidden[i][_Input_Node_Number]; if (ddrand() <= synapse_ratio){ aa = 0.0; } u -= aa ;/*しきい値の処理*/ // (1)一定の確率で、ニューロンを機能不能とする方式。 0.10 → 10% if (ddrand() <= node_ratio){ hidden_input[i] = 0.0; } else{ hidden_input[i] = sigmoid_function(u) ; } } /*出力outputの計算*/ double output = 0.0 ; for(int i = 0; i < _hidden_node_number; i++){ aa = weight_output[i] ; if (ddrand() <= synapse_ratio){ aa = 0.0; } output += hidden_input[i] * aa ; } aa = weight_output[_hidden_node_number] ; if (ddrand() <= synapse_ratio){ aa = 0.0; } output -= aa ;/*しきい値の処理*/ return sigmoid_function(output) ; } /********************* output_layer_learning()関数 出力層の重み学習 *********************/ void output_layer_learning(double weight_output[_hidden_node_number + 1] ,double hidden_input[],double e[],double o,int k) { /*重み計算に利用*/ double d = ( e[_Input_Node_Number+k] - o) * o * (1-o) ;/*誤差の計算*/ for(int i = 0; i < _hidden_node_number; i++){ weight_output[i]+=_alpha*hidden_input[i]*d ;/*重みの学習*/ } //weight_output[i]+=_alpha*(-1.0)*d ;/*しきい値の学習*/ weight_output[_hidden_node_number] += _alpha * (-1.0) * d ;/*しきい値の学習*/ } /**********************/ /* hidden_layer_learning()関数 */ /* 中間層の重み学習 */ /**********************/ void hidden_layer_learning(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1],double weight_output[_hidden_node_number + 1] ,double hidden_input[],double e[],double output,int k) { for(int j = 0; j < _hidden_node_number; j++){/*中間層の各セルjを対象*/ /*中間層の重み計算に利用*/ double dj = hidden_input[j] * (1 - hidden_input[j]) * weight_output[j] * (e[_Input_Node_Number+k] - output) * output * (1-output) ; for(int i = 0; i < _Input_Node_Number; i++){/*i番目の重みを処理*/ weight_hidden[j][i] += _alpha * e[i] * dj ; } // weight_hidden[j][i] += _alpha*(-1.0)*dj ;/*しきい値の学習*/ weight_hidden[j][_Input_Node_Number] += _alpha*(-1.0)*dj ;/*しきい値の学習*/ } } /**********************/ /* print_weight()関数 */ /* 結果の出力 */ /**********************/ void print_weight(double weight_hidden[_hidden_node_number][_Input_Node_Number + 1] ,double weight_output[_output_node_number][_hidden_node_number + 1]) { //int i,j ;/*繰り返しの制御*/ for(int i = 0; i < _hidden_node_number; i++){ for(int j = 0; j < _Input_Node_Number + 1; j++){ printf("weight_hidden[%d][%d] = %lf;\n",i, j, weight_hidden[i][j]) ; } } printf("\n") ; for(int i = 0; i < _output_node_number; i++){ for(int j = 0; j < _hidden_node_number + 1; j++){ printf("weight_output[%d][%d] = %lf;\n",i, j, weight_output[i][j]) ; } } printf("\n") ; } /*******************/ /* s()関数 */ /* シグモイド関数 */ /*******************/ double sigmoid_function(double u) { return 1.0/(1.0+exp(-u)) ; }