diff options
| author | Gertjan van den Burg <burg@ese.eur.nl> | 2014-03-17 11:45:57 +0100 |
|---|---|---|
| committer | Gertjan van den Burg <gertjanvandenburg@gmail.com> | 2014-05-19 22:21:34 -0700 |
| commit | b7c5d533d7fa249258c7b139996176130e7a8c80 (patch) | |
| tree | d3643b4e665e845d61ca06c5beeffed9a0230162 | |
| parent | write eigen to data structure (diff) | |
| download | gensvm-b7c5d533d7fa249258c7b139996176130e7a8c80.tar.gz gensvm-b7c5d533d7fa249258c7b139996176130e7a8c80.zip | |
work on regularization term with nonlinearity
| -rw-r--r-- | include/msvmmaj.h | 3 | ||||
| -rw-r--r-- | src/msvmmaj_kernel.c | 21 | ||||
| -rw-r--r-- | src/msvmmaj_train.c | 18 | ||||
| -rw-r--r-- | src/trainMSVMMaj.c | 3 |
4 files changed, 30 insertions, 15 deletions
diff --git a/include/msvmmaj.h b/include/msvmmaj.h index ec3da6f..1dba211 100644 --- a/include/msvmmaj.h +++ b/include/msvmmaj.h @@ -36,6 +36,8 @@ * @param *Q pointer to the error matrix * @param *H pointer to the Huber weighted error matrix * @param *R pointer to the 0-1 auxiliary matrix + * @param *J pointer to the diagonal matrix in the + * regularization term * @param *rho pointer to the instance weight vector * @param training_error error after training has completed * @param *data_file pointer to the filename of the data @@ -63,6 +65,7 @@ struct MajModel { double *Q; double *H; double *R; + double *J; double *rho; double training_error; char *data_file; diff --git a/src/msvmmaj_kernel.c b/src/msvmmaj_kernel.c index fc699dd..5ac138c 100644 --- a/src/msvmmaj_kernel.c +++ b/src/msvmmaj_kernel.c @@ -31,14 +31,18 @@ */ void msvmmaj_make_kernel(struct MajModel *model, struct MajData *data) { - if (model->kerneltype == K_LINEAR) + long i, j; + if (model->kerneltype == K_LINEAR) { + model->J = Calloc(double, model->m+1); + for (i=1; i<model->m+1; i++) + matrix_set(model->J, 1, i, 0, 1.0); return; + } - long i, j; long n = model->n; double value; double *x1, *x2; - double *K = Calloc(double, n*n*sizeof(double)); + double *K = Calloc(double, n*n); for (i=0; i<n; i++) { for (j=i; j<n; j++) { @@ -63,8 +67,6 @@ void msvmmaj_make_kernel(struct MajModel *model, struct MajData *data) } } - print_matrix(K, n, n); - double *P = Malloc(double, n*n); double *Lambda = Malloc(double, n); long num_eigen = msvmmaj_make_eigen(K, n, P, Lambda); @@ -79,6 +81,13 @@ void msvmmaj_make_kernel(struct MajModel *model, struct MajData *data) } data->m = n; + // Set the regularization matrix (change if not full rank used) + model->J = Calloc(double, model->m+1); + for (i=1; i<model->m+1; i++) { + value = 1.0/matrix_get(Lambda, 1, i-1, 0); + matrix_set(model->J, 1, i, 0, value); + } + // let data know what it's made of data->kerneltype = model->kerneltype; free(data->kernelparam); @@ -192,8 +201,6 @@ long msvmmaj_make_eigen(double *K, long n, double *P, double *Lambda) for (j=0; j<n; j++) P[i*n+j] = tempP[j*n+i]; - print_matrix(P, n, n); - free(tempP); // replace by number of columns of P diff --git a/src/msvmmaj_train.c b/src/msvmmaj_train.c index bbd2663..5018c3f 100644 --- a/src/msvmmaj_train.c +++ b/src/msvmmaj_train.c @@ -87,7 +87,7 @@ void msvmmaj_optimize(struct MajModel *model, struct MajData *data) Lbar = L; L = msvmmaj_get_loss(model, data, ZV); - if (it%50 == 0) + if (it%1 == 0) note("iter = %li, L = %15.16f, Lbar = %15.16f, " "reldiff = %15.16f\n", it, L, Lbar, (Lbar - L)/L); it++; @@ -156,10 +156,12 @@ double msvmmaj_get_loss(struct MajModel *model, struct MajData *data, loss /= ((double) n); value = 0; - for (i=1; i<m+1; i++) { + for (i=0; i<m+1; i++) { + rowvalue = 0; for (j=0; j<K-1; j++) { - value += pow(matrix_get(model->V, K-1, i, j), 2.0); + rowvalue += pow(matrix_get(model->V, K-1, i, j), 2.0); } + value += model->J[i] * rowvalue; } loss += model->lambda * value; @@ -421,12 +423,14 @@ void msvmmaj_get_update(struct MajModel *model, struct MajData *data, double *B, ZAZV, K-1); /* - * Add lambda to all diagonal elements except the - * first one. + * Add lambda to all diagonal elements except the first one. Recall + * that ZAZ is of size m+1 and is symmetric. */ i = 0; - for (j=0; j<m; j++) - ZAZ[i+=m+1 + 1] += model->lambda; + for (j=0; j<m; j++) { + i += (m+1) + 1; + ZAZ[i] += model->lambda * model->J[j+1]; + } // For the LAPACK call we need to switch to Column- // Major order. This is unnecessary for the matrix diff --git a/src/trainMSVMMaj.c b/src/trainMSVMMaj.c index 66f6450..af91eaf 100644 --- a/src/trainMSVMMaj.c +++ b/src/trainMSVMMaj.c @@ -105,7 +105,8 @@ int main(int argc, char **argv) // seed the random number generator (only place in programs is in // command line interfaces) - srand(time(NULL)); + //srand(time(NULL)); + srand(123456); if (msvmmaj_check_argv_eq(argc, argv, "-m")) { struct MajModel *seed_model = msvmmaj_init_model(); |
