54 memset( net, 0,
sizeof(
ANN ) );
56 if ( !(fh = fopen( FileName,
"r" ) ) ){
57 fprintf( stderr,
"Input file doesn't exist.\n" );
62 fscanf(fh,
"%d",&(net->
N_input));
64 fprintf( stderr,
"Input number must be greater than zero.\n");
70 fprintf( stderr,
"Output number must be greater than zero.\n");
74 fscanf(fh,
"%d",&(net->
N_layer));
76 fprintf( stderr,
"Hidden layer number must be not negative.\n");
80 fscanf(fh,
"%d",&(net->
r));
82 fprintf( stderr,
"Timestep delay number must be not negative.\n");
87 if( !(net->
N_neuron = (
int *)malloc( (net->
N_layer+2) *
sizeof(
int) ) ) ){
92 for( i=0;i<net->
N_layer+1;i++ ){
93 fscanf(fh,
"%d",&(net->
N_neuron[i+1]));
95 fprintf( stderr,
"Neuron number at %d layer must be greater than zero.\n",i+1);
102 fscanf( fh,
"%d", &ActFnc);
119 fprintf( stderr,
"Unknown activation function\n" );
132 fscanf( fh,
"%le", &(net->
eta));
134 fprintf( stderr,
"Learning rate must be not negative.\n");
138 fscanf( fh,
"%le", &(net->
rho));
144 for( i=0; i<net->
N_layer+1; i++ ){
202 for( i=0; i<net->
r; i++){
251 for( i=0;i<net->
N_layer+1;i++ ){
269 for( i=0;i<net->
N_layer+2;i++ ){
308 for( j=0; j<net->
N_layer+1; j++ ){
318 for( k=0; k<net->
r; k++ ){
320 for( j=0; j<net->
N_layer+1; j++ ){
346 fprintf( fh,
"ARTIFICIAL NEURAL NETWORK\n");
347 fprintf( fh,
"Network topology\n");
348 fprintf( fh,
"-Input number: %d \n", net->
N_input);
349 fprintf( fh,
"-Output number: %d \n", net->
N_output);
350 fprintf( fh,
"-Hidden layers number: %d \n", net->
N_layer);
352 for( i=0;i<net->
N_layer+1;i++ ){
353 fprintf( fh,
"-Neurons number (layer number %d) : %d\n", i+1, net->
N_neuron[i+1]);
355 fprintf( fh,
"-Time delay number: %d \n", net->
r);
357 fprintf( fh,
"Training parameters\n");
358 fprintf( fh,
"-Learning rate: %e \n", net->
eta);
359 fprintf( fh,
"-Momentum term: %e \n", net->
rho);
361 fprintf( fh,
"Activation function parameters\n");
364 fprintf( fh,
"Synaptic weight\n");
366 for( i=0; i<net->
N_layer+1; i++ ){
368 fprintf( fh,
"-Layer number %d :\n",i+1);
370 fprintf( fh,
"-Visible layer :\n");
376 fprintf( fh,
"Input scale factors\n" );
381 fprintf( fh,
"Output scale factors\n" );
388 fprintf( fh,
" %d \n", net->
N_input);
389 fprintf( fh,
" %d \n", net->
N_output);
390 fprintf( fh,
" %d \n", net->
N_layer);
391 fprintf( fh,
" %d \n", net->
r);
393 for( i=0;i<net->
N_layer+1;i++ ){
394 fprintf( fh,
" %d ", net->
N_neuron[i+1]);
396 fprintf( fh,
"\n\n" );
401 fprintf( fh,
" %e \n", net->
eta);
402 fprintf( fh,
" %e \n", net->
rho);
405 for( i=0; i<net->
N_layer+1; i++ ){
410 fprintf( fh,
"\n\n" );
433 for( i=0; i<net->
N_input; i++ ){
445 for( i=0;i<net->
N_layer+1;i++ ){
450 for( j=0; j<net->
N_neuron[i+1];j++ ){
480 if( !( fh = fopen( FileName,
"r" ) ) ){
485 fscanf( fh,
"%d", &Nrow);
486 fscanf( fh,
"%d", &Ncolumn);
508 if( !( fh = fopen( FileName,
"w" ) ) ){
556 for( i=0;i<net->
N_layer+1;i++ ){
573 if( !( *vm = (
matrix *)calloc( N_layer+1,
sizeof(
matrix) ) ) ){
577 for( i=0; i<N_layer+1; i++ ){
578 if(
matrix_init( &(*vm)[i], N_neuron[i], N_neuron[i+1] ) ){
596 for( i=0; i<N_layer+2; i++ ){
618 for( j=0; j<net->
N_input; j++ ){
622 for( k=0;k<net->
r;k++ ){
634 for( j=0; j<net->
N_neuron[i+1];j++ ){
698 temp = (net->
v[0].
vec[i]);
716 for( k=0; k<net->
N_layer; k++ ){
732 temp = (net->
v[0].
vec[i]);
746 for( p=0;p<(net->
r-1);p++ ){
773 for( t=0; t<N_sample; t++ ){
777 for( i=0; i<net->
N_input; i++ ){
800 for( i=0;i<net->
N_layer+1;i++ ){
838 for( i=0; i<net->
N_layer+1; i++ ){
845 for( i=0; i<net->
r; i++ ){
847 for( k=0; k<net->
N_layer+1; k++ ){
866 fprintf( stderr,
"Incompatible dimensions\n" );
872 for( i=0; i<DES_OUTPUT->
Nrow; i++ ){
873 for( j=0; j<DES_OUTPUT->
Ncolumn; j++ ){
874 *err += 0.5*( DES_OUTPUT->
mat[i][j]-NN_OUTPUT->
mat[i][j] )*( DES_OUTPUT->
mat[i][j]-NN_OUTPUT->
mat[i][j] );
887 for( i=0; i<N_layer+1; i++ ){
888 for( j=0; j<N_neuron[i]; j++ ){
889 for( k=0; k<N_neuron[i+1]; k++ ){
891 ((*vm1)[i]).
mat[j][k] = K*((*vm2)[i]).
mat[j][k];
903 case ANN_NO_MEMORY: fprintf( stderr,
"Memory error(@ %s)\n",
string );
905 case ANN_MATRIX_ERROR: fprintf( stderr,
"Error in using matrix library(@ %s)\n",
string );
907 case ANN_NO_FILE: fprintf( stderr,
"Error in file opening(@ %s)\n",
string );
909 case ANN_DATA_ERROR: fprintf( stderr,
"Error in data value(@ %s)\n",
string );
911 case ANN_GEN_ERROR: fprintf( stderr,
"Error(@ %s)\n",
string );
925 for( i=0; i<net->
N_input; i++ ){
928 for( j=0; j<net->
N_layer+1; j++ ){
933 for( k=0; k<net->
N_neuron[j+1];k++ ){
mat_res_t vector_null(vector *VEC)
mat_res_t matrix_write(matrix *MAT, FILE *fh, unsigned flags)
int w_linear_write(void *priv, FILE *fh, unsigned flags)
ann_res_t ANN_DataWrite(matrix *MAT, char *FileName)
ann_res_t ANN_write(ANN *net, FILE *fh, unsigned flags)
ann_res_t ANN_sim(ANN *net, vector *input, vector *output, unsigned flags)
ann_res_t ANN_WeightUpdate(ANN *net, ANN_vector_matrix DW, double K)
int w_tanh_eval(void *priv, double in, int order, double *outp)
static void output(const LoadableElem *pEl, OutputHandler &OH)
ann_res_t ANN_vector_matrix_ass(ANN_vector_matrix *vm1, ANN_vector_matrix *vm2, int *N_neuron, int N_layer, double K)
ann_res_t ANN_DataRead(matrix *MAT, int *N_sample, char *FileName)
int error(const char *test, int value)
int w_linear_init(void **privp)
int w_tanh_read(void *priv, FILE *fh, unsigned flags)
mat_res_t vector_init(vector *VEC, unsigned dimension)
ann_res_t ANN_init(ANN *net, const char *FileName)
int w_tanh_write(void *priv, FILE *fh, unsigned flags)
ann_res_t ANN_vector_vector_init(ANN_vector_vector *vv, int *N_neuron, int N_layer)
ann_res_t ANN_vector_matrix_init(ANN_vector_matrix *vm, int *N_neuron, int N_layer)
mat_res_t matrixT_vector_prod(matrix *MAT, vector *VEC, vector *VEC_R)
ann_res_t ANN_dXdW(ANN *net, int I, int J, int N)
int w_tanh_init(void **privp)
mat_res_t matrix_null(matrix *MAT)
int w_linear_eval(void *priv, double in, int order, double *outp)
int w_tanh_destroy(void *priv)
ann_res_t ANN_TrainingEpoch(ANN *net, matrix *INPUT, matrix *DES_OUTPUT, matrix *NN_OUTPUT, int N_sample, ann_training_mode_t mode)
mat_res_t matrix_destroy(matrix *MAT)
ann_res_t ANN_reset(ANN *net)
#define ANN_FEEDBACK_UPDATE
static doublereal mat[5][5]
mat_res_t matrix_vector_prod(matrix *MAT, vector *VEC, vector *VEC_R)
ann_res_t ANN_TotalError(matrix *DES_OUTPUT, matrix *NN_OUTPUT, double *err)
mat_res_t matrix_read(matrix *MAT, FILE *fh, unsigned flags)
mat_res_t vector_destroy(vector *VEC)
mat_res_t matrix_sum(matrix *MAT1, matrix *MAT2, matrix *MAT_R, double K)
ann_res_t ANN_dEdW(ANN *net, vector *e)
int w_linear_read(void *priv, FILE *fh, unsigned flags)
double ANN_InternalFunctionDer(double v, ANN *net)
void ANN_error(ann_res_t error, char *string)
ann_res_t ANN_jacobian_matrix(ANN *net, matrix *jacobian)
mat_res_t matrix_init(matrix *MAT, unsigned Nrow, unsigned Ncolumn)
int w_linear_destroy(void *priv)
ANN_vector_vector Y_neuron
double ANN_InternalFunction(double v, ANN *net)
ann_res_t ANN_destroy(ANN *net)