MBDyn-1.7.3
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups
ann_tr.c
Go to the documentation of this file.
1 /* $Header: /var/cvs/mbdyn/mbdyn/mbdyn-1.0/utils/ann_tr.c,v 1.19 2017/01/12 15:10:27 masarati Exp $ */
2 /*
3  * MBDyn (C) is a multibody analysis code.
4  * http://www.mbdyn.org
5  *
6  * Copyright (C) 1996-2017
7  *
8  * Pierangelo Masarati <masarati@aero.polimi.it>
9  * Paolo Mantegazza <mantegazza@aero.polimi.it>
10  *
11  * Dipartimento di Ingegneria Aerospaziale - Politecnico di Milano
12  * via La Masa, 34 - 20156 Milano, Italy
13  * http://www.aero.polimi.it
14  *
15  * Changing this copyright notice is forbidden.
16  *
17  * This program is free software; you can redistribute it and/or modify
18  * it under the terms of the GNU General Public License as published by
19  * the Free Software Foundation (version 2 of the License).
20  *
21  *
22  * This program is distributed in the hope that it will be useful,
23  * but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25  * GNU General Public License for more details.
26  *
27  * You should have received a copy of the GNU General Public License
28  * along with this program; if not, write to the Free Software
29  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30  */
31 
32 /*
33  * Copyright (C) 2008
34  *
35  * Mattia Mattaboni <mattaboni@aero.polimi.it>
36  */
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <math.h>
40 #include <getopt.h>
41 #include <string.h>
42 
43 #include "ann.h"
44 
45 /* TRAINING PARAMETERS DEFAULT */
47 static float TOLL = 0.;
48 static int MAXITER = 1000;
49 static int PRINTSTEP = 1;
50 static int SAVESTEP = 1;
51 static int verbose = 0;
52 static char *ANNfile = "data/ann.dat";
53 static char *INPUTfile = "data/Input.dat";
54 static char *DOUTPUTfile = "data/DOutput.dat";
55 static char *SAVEfile = "data/ann_tr.dat";
56 static char *NN_OUTPUTfile = "data/NNOutput.dat";
57 
58 static void
60 {
61  fprintf(stdout, "\nUSAGE OPTIONS:\n"
62  " -u, --usage\n"
63  " print usage\n"
64  " -v, --verbose\n"
65  " verbose output\n"
66  " -m, --mode\n"
67  " training mode: BATCH (default)\n"
68  " SEQUENTIAL\n"
69  " -t, --toll\n"
70  " tollerance ( default 0. )\n"
71  " -i, --maxiter\n"
72  " max number of training iteration (default) 1000\n"
73  " -p, --print\n"
74  " printing output step (default 1)\n"
75  " -s, --save\n"
76  " saving ANN trained step (default 1)\n"
77  " -A, --ann_init\n"
78  " filename of initialized neural network (default data/ann.dat)\n"
79  " -I, --input\n"
80  " filename of network training input (default data/Input.dat)\n"
81  " -O, --des_output\n"
82  " filename of network desired output (default data/DOutput.dat)\n"
83  " -T, --ann_tr\n"
84  " filename where to save trained neural network (default data/ann_tr.dat)\n"
85  " -N, --nn_output\n"
86  " filename where to save trained neural network output (default data/NNOutput.dat)\n"
87  );
88  exit(0);
89 }
90 
91 int
92 main(int argc, char *argv[])
93 {
94  ANN net = { 0 };
95  matrix INPUT, DES_OUTPUT, NN_OUTPUT;
96  matrix INPUT2, DES_OUTPUT2;
97  vector ERR2;
98  int N_sample,i,j,ind;
99  int Niter, CNT;
100  double err1, err2;
101  ANN_vector_matrix W1, W2, dWold, dWnew;
102  FILE *fh;
103  int opt;
104  extern char *optarg;
105 
106 
107  /* 0. Training options */
108  do {
109 #ifdef HAVE_GETOPT_LONG
110  static struct option options[] = {
111  { "usage", 0, 0, 'u' },
112  { "verbose", 0, 0, 'v' },
113  { "mode", 1, 0, 'm' },
114  { "toll", 1, 0, 't' },
115  { "maxiter", 1, 0, 'i' },
116  { "print", 1, 0, 'p' },
117  { "save", 1, 0, 's' },
118  { "ann_init", 1, 0, 'A' },
119  { "input", 1, 0, 'I' },
120  { "des_output", 1, 0, 'O' },
121  { "ann_tr", 1, 0, 'T' },
122  { "nn_output", 1, 0, 'N' }
123  };
124  opt = getopt_long(argc, argv, "uvm:t:i:p:s:A:I:O:T:N:", options, NULL);
125 #else /* ! HAVE_GETOPT_LONG */
126  opt = getopt(argc, argv, "uvm:t:i:p:s:A:I:O:T:N:");
127 #endif /* ! HAVE_GETOPT_LONG */
128  switch (opt) {
129  case 'u': print_usage();
130  break;
131  case 'v': verbose = 1;
132  break;
133  case 'm':
134  if (strcasecmp(optarg, "batch") == 0) {
136  } else if (strcasecmp(optarg, "sequential") == 0) {
138  } else {
139  fprintf(stderr, "unknown training mode \"%s\" {batch|sequential}\n", optarg);
140  return 1;
141  }
142  break;
143  break;
144  case 't': TOLL = atof(optarg);
145  break;
146  case 'i': MAXITER = atoi(optarg);
147  break;
148  case 'p': PRINTSTEP = atoi(optarg);
149  break;
150  case 's': SAVESTEP = atoi(optarg);
151  break;
152  case 'A': ANNfile = optarg;
153  break;
154  case 'I': INPUTfile = optarg;
155  break;
156  case 'O': DOUTPUTfile = optarg;
157  break;
158  case 'T': SAVEfile = optarg;
159  break;
160  case 'N': NN_OUTPUTfile = optarg;
161  break;
162  default: break;
163  }
164  } while (opt >= 0);
165 
166 
167  /* 1. Artificial Neural Network inizialization*/
168  printf("LOADING DATA...\n");
169  if (ANN_init(&net, ANNfile)) {
170  fprintf(stdout, "Error in ANN initialization\n");
171  return 1;
172  }
173  if (TRAINING_MODE == ANN_TM_BATCH) { // ADAPTIVE LEARNING RATE
174  if (ANN_vector_matrix_init(&W1, net.N_neuron, net.N_layer)) {
175  fprintf(stderr, "Initialization error\n");
176  return 1;
177  }
178  if (ANN_vector_matrix_init(&W2, net.N_neuron, net.N_layer)) {
179  fprintf(stderr, "Initialization error\n");
180  return 1;
181  }
182  if (ANN_vector_matrix_init(&dWold, net.N_neuron, net.N_layer)) {
183  fprintf(stderr, "Initialization error\n");
184  return 1;
185  }
186  if (ANN_vector_matrix_init(&dWnew, net.N_neuron, net.N_layer)) {
187  fprintf(stderr, "Initialization error\n");
188  return 1;
189  }
190  }
191 
192  /* 2. Trainig data data acquisition*/
193  N_sample = 0;
194  if (ANN_DataRead(&INPUT, &N_sample, INPUTfile)) {
195  fprintf(stderr, "Error in Input data acquisition\n");
196  return 1;
197  }
198  if (ANN_DataRead(&DES_OUTPUT, &N_sample, DOUTPUTfile)) {
199  fprintf(stderr, "Error in Output data acquisition\n");
200  return 1;
201  }
202  if (matrix_init(&NN_OUTPUT, N_sample, net.N_output)) {
203  fprintf(stderr, "Error in NN_output matrix initialization\n");
204  return 1;
205  }
206  if (matrix_init(&DES_OUTPUT2, N_sample, net.N_output)) {
207  fprintf(stderr, "Error in NN_output matrix initialization\n");
208  return 1;
209  }
210  if (matrix_init(&INPUT2, N_sample, net.N_input)) {
211  fprintf(stderr, "Error in NN_output matrix initialization\n");
212  return 1;
213  }
214  if (vector_init(&ERR2, MAXITER)) {
215  fprintf(stderr, "Error in NN_output matrix initialization\n");
216  return 1;
217  }
218 
219  ANN_write(&net, stdout, ANN_W_A_TEXT);
220  fprintf(stdout, "TRAINING....\n");
221 
222  Niter = 0;
223  err2 = 10000000000.;
224  CNT = 0;
225  do {
226  Niter++;
227  err1 = err2;
228  if (TRAINING_MODE == ANN_TM_BATCH) {
229  if (ANN_vector_matrix_ass(&W2, &W1, net.N_neuron, net.N_layer, 1.)) {
230  fprintf(stderr, "Error in ....\n");
231  }
232  if (ANN_vector_matrix_ass(&W1, &net.W, net.N_neuron, net.N_layer, 1.)) {
233  fprintf(stderr, "Error in ....\n");
234  }
235  }
236  ANN_reset(&net);
237 
238  if (ANN_TrainingEpoch(&net, &INPUT, &DES_OUTPUT, &NN_OUTPUT, N_sample, TRAINING_MODE)) {
239  fprintf(stderr, "Error: ANN_TrainingEpoch@main ppp\n");
240  return 1;
241  }
242  if (verbose) {
243  ANN_write(&net, stdout, ANN_W_A_TEXT);
244  }
245  ANN_TotalError(&DES_OUTPUT, &NN_OUTPUT, &err2);
246 
247  /* per l'addestramento in modalit√† BATCH il tasso di apprendimento √®
248  * adattativo!!! */
249  if (TRAINING_MODE == ANN_TM_BATCH) {
250  CNT++;
251  while (err2 > err1) {
252  CNT = 0;
253  net.eta = 0.5*net.eta;
254  if (verbose) {
255  fprintf(stdout, "Network's learning rate decreasing (eta = %lf)\n", net.eta);
256  }
257  ANN_vector_matrix_ass(&net.W, &W2, net.N_neuron, net.N_layer, 1.);
258  ANN_vector_matrix_ass(&dWold, &dWold, net.N_neuron, net.N_layer, 0.5);
259  ANN_WeightUpdate(&net, dWold, 1.);
260  ANN_vector_matrix_ass(&W1, &net.W, net.N_neuron, net.N_layer, 1.);
261 
262  ANN_reset(&net);
263  if (ANN_TrainingEpoch(&net, &INPUT, &DES_OUTPUT, &NN_OUTPUT, N_sample, TRAINING_MODE)) {
264  fprintf(stderr, "Error: ANN_TrainingEpoch@main\n");
265  return 1;
266  }
267  ANN_TotalError(&DES_OUTPUT, &NN_OUTPUT, &err2);
268  }
269  ANN_vector_matrix_ass(&dWold, &net.dW, net.N_neuron, net.N_layer, 1.);
270  if (CNT == 20) {
271  net.eta = 1.1*net.eta;
272  if (verbose) {
273  fprintf(stdout, "Network's learning rate increasing (eta = %lf)\n", net.eta);
274  }
275  CNT = 0;
276  }
277  }
278  /* randimize training example */
279  /*matrix_write(&INPUT, stdout, W_M_BIN);
280  matrix_write(&DES_OUTPUT, stdout, W_M_BIN);*/
281  matrix_copy( &INPUT2, &INPUT, 1. );
282  matrix_copy( &DES_OUTPUT2, &DES_OUTPUT, 1. );
283  for ( i=0; i<N_sample; i++ ){
284  ind = floor(rand()%(N_sample-i));
285  for ( j=0; j< INPUT2.Ncolumn; j++){
286  INPUT.mat[i][j] = INPUT2.mat[ind][j];
287  INPUT2.mat[ind][j] = INPUT2.mat[N_sample-1-i][j];
288  }
289  for ( j=0; j< DES_OUTPUT2.Ncolumn; j++){
290 
291  DES_OUTPUT.mat[i][j] = DES_OUTPUT2.mat[ind][j];
292  DES_OUTPUT2.mat[ind][j] = DES_OUTPUT2.mat[N_sample-1-i][j];
293  }
294  }
295  /*matrix_write(&INPUT, stdout, W_M_BIN);
296  matrix_write(&DES_OUTPUT, stdout, W_M_BIN);
297  getchar();*/
298 
299  if (!(Niter%PRINTSTEP)) {
300  fprintf(stdout, "TRAINING: iter:%d ", Niter);
301  fprintf(stdout, "Square error: :%le\n", err2);
302  }
303 
304  if( !(Niter%SAVESTEP) ){
305  fh = fopen(SAVEfile, "w");
306  fprintf( stdout, "SAVING DATA...\n");
307  if (ANN_write( &net, fh, ANN_W_A_BIN)) {
308  fprintf(stderr, "Error in data saving\n");
309  return 1;
310  }
311  fclose(fh);
312  }
313  ERR2.vec[Niter-1] = err2;
314 
315  } while ((err2>TOLL) && (Niter<MAXITER));
316 
317 
318  fprintf(stdout, "SAVING DATA...\n");
319  fh = fopen(SAVEfile, "w");
320  if (ANN_write(&net, fh, ANN_W_A_BIN)) {
321  fprintf(stderr, "Error: ANN_save@main\n");
322  return 1;
323  }
324  fclose(fh);
325  ANN_DataWrite(&NN_OUTPUT, NN_OUTPUTfile);
326  fh = fopen("ERR.txt", "w");
327  vector_write(&ERR2,fh,W_M_BIN );
328  fclose(fh);
329 
330  /* dynamic memory free*/
331  matrix_destroy(&INPUT);
332  matrix_destroy(&DES_OUTPUT);
333  matrix_destroy(&INPUT2);
334  matrix_destroy(&DES_OUTPUT2);
335  matrix_destroy(&NN_OUTPUT);
336  vector_destroy(&ERR2);
337  if (TRAINING_MODE == ANN_TM_BATCH) {
338  for (i = 0; i < net.N_layer + 1; i++) {
339  matrix_destroy(&W1[i]);
340  matrix_destroy(&W2[i]);
341  matrix_destroy(&dWnew[i]);
342  matrix_destroy(&dWold[i]);
343  }
344  free(W1);
345  free(W2);
346  free(dWnew);
347  free(dWold);
348  }
349 
350  ANN_destroy(&net);
351  fprintf(stdout, "END.......\n");
352  return 0;
353 }
mat_res_t vector_write(vector *VEC, FILE *fh, unsigned flags)
Definition: matrix.c:487
ann_res_t ANN_DataWrite(matrix *MAT, char *FileName)
Definition: ann.c:504
Definition: matrix.h:68
ann_res_t ANN_write(ANN *net, FILE *fh, unsigned flags)
Definition: ann.c:341
ann_res_t ANN_WeightUpdate(ANN *net, ANN_vector_matrix DW, double K)
Definition: ann.c:552
ANN_vector_matrix W
Definition: ann.h:95
static void print_usage(void)
Definition: ann_tr.c:59
ann_res_t ANN_vector_matrix_ass(ANN_vector_matrix *vm1, ANN_vector_matrix *vm2, int *N_neuron, int N_layer, double K)
Definition: ann.c:883
unsigned Ncolumn
Definition: matrix.h:64
ann_res_t ANN_DataRead(matrix *MAT, int *N_sample, char *FileName)
Definition: ann.c:475
mat_res_t matrix_copy(matrix *MAT1, matrix *MAT2, double K)
Definition: matrix.c:140
static char * INPUTfile
Definition: ann_tr.c:53
mat_res_t vector_init(vector *VEC, unsigned dimension)
Definition: matrix.c:68
double eta
Definition: ann.h:91
ann_training_mode_t
Definition: ann.h:64
#define CNT
int N_output
Definition: ann.h:85
ann_res_t ANN_init(ANN *net, const char *FileName)
Definition: ann.c:48
ann_res_t ANN_vector_matrix_init(ANN_vector_matrix *vm, int *N_neuron, int N_layer)
Definition: ann.c:569
#define W_M_BIN
Definition: matrix.h:48
Definition: matrix.h:61
static int MAXITER
Definition: ann_tr.c:48
#define ANN_W_A_TEXT
Definition: ann.h:48
#define ANN_W_A_BIN
Definition: ann.h:49
ANN_vector_matrix dW
Definition: ann.h:114
ann_res_t ANN_TrainingEpoch(ANN *net, matrix *INPUT, matrix *DES_OUTPUT, matrix *NN_OUTPUT, int N_sample, ann_training_mode_t mode)
Definition: ann.c:769
mat_res_t matrix_destroy(matrix *MAT)
Definition: matrix.c:84
ann_res_t ANN_reset(ANN *net)
Definition: ann.c:828
int N_layer
Definition: ann.h:86
int N_input
Definition: ann.h:84
static char * DOUTPUTfile
Definition: ann_tr.c:54
static int SAVESTEP
Definition: ann_tr.c:50
static int PRINTSTEP
Definition: ann_tr.c:49
ann_res_t ANN_TotalError(matrix *DES_OUTPUT, matrix *NN_OUTPUT, double *err)
Definition: ann.c:861
static char * NN_OUTPUTfile
Definition: ann_tr.c:56
int * N_neuron
Definition: ann.h:87
Definition: ann.h:74
double ** mat
Definition: matrix.h:62
int getopt(int argc, char *const argv[], const char *opts)
Definition: getopt.c:93
static int verbose
Definition: ann_tr.c:51
mat_res_t vector_destroy(vector *VEC)
Definition: matrix.c:97
double * vec
Definition: matrix.h:69
static float TOLL
Definition: ann_tr.c:47
static char * ANNfile
Definition: ann_tr.c:52
char * optarg
Definition: getopt.c:74
int main(int argc, char *argv[])
Definition: ann_tr.c:92
static ann_training_mode_t TRAINING_MODE
Definition: ann_tr.c:46
mat_res_t matrix_init(matrix *MAT, unsigned Nrow, unsigned Ncolumn)
Definition: matrix.c:43
ann_res_t ANN_destroy(ANN *net)
Definition: ann.c:218
static char * SAVEfile
Definition: ann_tr.c:55