From 203ee5997bf80d4386b7b9fcd17365763c36e0ad Mon Sep 17 00:00:00 2001 From: Gertjan van den Burg Date: Mon, 5 Aug 2013 17:21:36 +0200 Subject: initial commit --- src/libMSVMMaj.c | 651 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/libMSVMMaj.o | Bin 0 -> 67640 bytes src/trainMSVMMaj.c | 121 ++++++++++ src/util.c | 311 +++++++++++++++++++++++++ src/util.o | Bin 0 -> 40096 bytes 5 files changed, 1083 insertions(+) create mode 100644 src/libMSVMMaj.c create mode 100644 src/libMSVMMaj.o create mode 100644 src/trainMSVMMaj.c create mode 100644 src/util.c create mode 100644 src/util.o (limited to 'src') diff --git a/src/libMSVMMaj.c b/src/libMSVMMaj.c new file mode 100644 index 0000000..3dc5e46 --- /dev/null +++ b/src/libMSVMMaj.c @@ -0,0 +1,651 @@ +#include "libMSVMMaj.h" + +/* + Generate the simplex matrix. A pointer to the matrix + must be given, and the matrix must have been + allocated. +*/ +void simplex_gen(long K, double *U) +{ + long i, j; + for (i=0; in; + long K = model->K; + + for (i=0; iy[i] != j+1) { + matrix_set(model->R, K, i, j, 1.0); + } + } + } +} + +void simplex_diff(struct Model *model, struct Data *data) +{ + long i, j, k; + double value; + + long n = model->n; + long K = model->K; + + for (i=0; iU, K-1, data->y[i]-1, j); + value -= matrix_get(model->U, K-1, k, j); + matrix3_set(model->UU, K-1, K, i, j, k, value); + } + } + } +} + +/* + Calculate the errors Q based on the current value of V. + It is assumed that the memory for Q has already been allocated. + In addition, the matrix ZV is calculated here. It is assigned to a + pre-allocated block of memory, since it would be inefficient to keep + reassigning this block at every iteration. +*/ +void calculate_errors(struct Model *model, struct Data *data, double *ZV) +{ + long i, j, k; + double a, value; + + long n = model->n; + long m = model->m; + long K = model->K; + + //info("\t\tCalculating ZV ... "); + cblas_dgemm( + CblasRowMajor, + CblasNoTrans, + CblasNoTrans, + n, + K-1, + m+1, + 1.0, + data->Z, + m+1, + model->V, + K-1, + 0.0, + ZV, + K-1); + //info("done\n"); + Memset(model->Q, double, n*K); + + //info("\t\tCalculating qs ... "); + for (i=0; iUU, K-1, K, i, j, k); + matrix_add(model->Q, K, i, k, value); + } + } + } + //info("done\n"); +} + +/* + Calculate the Huber hinge errors for each error in the matrix Q. +*/ +void calculate_huber(struct Model *model) +{ + long i, j; + double q, value; + + for (i=0; in; i++) { + for (j=0; jK; j++) { + q = matrix_get(model->Q, model->K, i, j); + value = 0.0; + if (q <= -model->kappa) { + value = 1.0 - q - (model->kappa+1.0)/2.0; + } else if (q <= 1.0) { + value = 1.0/(2.0*model->kappa+2.0)*pow(1.0 - q, 2.0); + } + matrix_set(model->H, model->K, i, j, value); + } + } +} + +/* + Calculate the value of the loss function based on the current estimate + of V. +*/ +double get_msvmmaj_loss(struct Model *model, struct Data *data, double *ZV) +{ + long i, j; + long n = data->n; + long K = data->K; + long m = data->m; + + double value, rowvalue, loss = 0.0; + + //info("\tCalculating errors\n"); + calculate_errors(model, data, ZV); + //info("\tCalculating Hubers\n"); + calculate_huber(model); + + for (i=0; iH, K, i, j); + value = pow(value, model->p); + value *= matrix_get(model->R, K, i, j); + } + rowvalue += value; + rowvalue = pow(rowvalue, 1.0/(model->p)); + rowvalue *= model->rho[i]; + loss += rowvalue; + } + loss /= ((double) n); + + value = 0; + for (i=1; iV, K-1, i, j), 2.0); + } + } + loss += model->lambda * value; + + return loss; +} + + +/* + Training loop is defined here. +*/ +void main_loop(struct Model *model, struct Data *data) +{ + long i, j, it = 0; + double L, Lbar; + + long n = model->n; + long m = model->m; + long K = model->K; + + srand(time(NULL)); + + int *ClassIdx = Malloc(int, n); + double *A = Malloc(double, n); + double *B = Malloc(double, n*(K-1)); + double *ZV = Malloc(double, n*(K-1)); + double *ZAZ = Malloc(double, (m+1)*(m+1)); + double *ZAZV = Malloc(double, (m+1)*(K-1)); + double *ZAZVT = Malloc(double, (m+1)*(K-1)); + double *Omega = Malloc(double, n); + + Memset(ClassIdx, int, n); + Memset(A, double, n); + Memset(B, double, n*(K-1)); + Memset(ZV, double, n*(K-1)); + Memset(ZAZ, double, (m+1)*(m+1)); + Memset(ZAZV, double, (m+1)*(K-1)); + Memset(ZAZVT, double, (m+1)*(K-1)); + Memset(Omega, double, n); + + info("Starting main loop.\n"); + info("Dataset:\n"); + info("\tn = %i\n", n); + info("\tm = %i\n", m); + info("\tK = %i\n", K); + info("Parameters:\n"); + info("\tkappa = %f\n", model->kappa); + info("\tp = %f\n", model->p); + info("\tlambda = %15.16f\n", model->lambda); + info("\n"); + + info("Z:\n"); + print_matrix(data->Z, n, m+1); + + //info("Generating simplex\n"); + simplex_gen(model->K, model->U); + //info("Generating simplex diff\n"); + simplex_diff(model, data); + //info("Generating category matrix\n"); + category_matrix(model, data); + + // Initialize V + //info("Initializing V\n"); + for (i=0; iV, K-1, i, j, -1.0+2.0*rnd()); + matrix_set(model->V, K-1, i, j, 1.0); + + //info("Getting initial loss\n"); + L = get_msvmmaj_loss(model, data, ZV); + Lbar = L + 2.0*model->epsilon*L; + + while ((it < MAX_ITER) && (Lbar - L)/L > model->epsilon) + { + info("################## Before %i ################\n", it); + info("V:\n"); + print_matrix(model->V, m+1, K-1); + info("Vbar:\n"); + print_matrix(model->Vbar, m+1, K-1); + info("Q:\n"); + print_matrix(model->Q, n, K); + info("H:\n"); + print_matrix(model->H, n, K); + info("ZV:\n"); + print_matrix(ZV, n, K-1); + info("ClassIdx:\n"); + for (i=0; iV, m+1, K-1); + info("Vbar:\n"); + print_matrix(model->Vbar, m+1, K-1); + info("Q:\n"); + print_matrix(model->Q, n, K); + info("H:\n"); + print_matrix(model->H, n, K); + info("ZV:\n"); + print_matrix(ZV, n, K-1); + info("ClassIdx:\n"); + for (i=0; i 50) + step_doubling(model); + + Lbar = L; + L = get_msvmmaj_loss(model, data, ZV); + + if (it%1 == 0) + info("iter = %li, L = %15.16f, Lbar = %15.16f, reldiff = %15.16f\n", + it, L, Lbar, (Lbar - L)/L); + it++; + } + + info("optimization finished, iter = %li\n", it-1); + + for (i=0; it[i] = matrix_get(model->V, K-1, 0, i); + for (i=1; iW, K-1, i-1, j, matrix_get(model->V, K-1, i, j)); + + info("I'm going to free some stuff ... "); + info("0"); + free(ClassIdx); + info("1"); + free(A); + info("2"); + free(B); + info("3"); + free(Omega); + info("4"); + free(ZV); + info("5"); + free(ZAZ); + info("6"); + free(ZAZV); + info("7"); + free(ZAZVT); + info("8"); + info(" stuff free.\n"); + +} + +void step_doubling(struct Model *model) +{ + long i, j; + + long m = model->m; + long K = model->K; + + for (i=0; iV, K-1, i, j, 2.0); + matrix_add(model->V, K-1, i, j, -matrix_get(model->Vbar, K-1, i, j)); + } + } +} + +int dposv(char UPLO, int N, int NRHS, double *A, int LDA, double *B, + int LDB) +{ + extern void dposv_(char *UPLO, int *Np, int *NRHSp, double *A, + int *LDAp, double *B, int *LDBp, int *INFOp); + int INFO; + dposv_(&UPLO, &N, &NRHS, A, &LDA, B, &LDB, &INFO); + return INFO; +} + +int dsysv(char UPLO, int N, int NRHS, double *A, int LDA, int *IPIV, + double *B, int LDB, double *WORK, int LWORK) +{ + extern void dsysv_(char *UPLO, int *Np, int *NRHSp, double *A, + int *LDAp, int *IPIV, double *B, int *LDBp, + double *WORK, int *LWORK, int *INFOp); + int INFO; + dsysv_(&UPLO, &N, &NRHS, A, &LDA, IPIV, B, &LDB, WORK, &LWORK, &INFO); + return INFO; +} + +void msvmmaj_update(struct Model *model, struct Data *data, + int *ClassIdx, double *A, double *B, double *Omega, + double *ZAZ, double *ZAZV, double *ZAZVT) +{ + // Because msvmmaj_update is always called after a call to + // get_msvmmaj_loss with the latest V, it is unnecessary to recalculate + // the matrix ZV, the errors Q and the Huber errors H. Awesome! + int status; + long i, j, k; + double Avalue, Bvalue; + double omega, value, a, b, q, h, r; + + long n = model->n; + long m = model->m; + long K = model->K; + + double kappa = model->kappa; + double p = model->p; + double *rho = model->rho; + + const double a2g2 = 0.25*p*(2.0*p - 1.0)*pow((kappa+1.0)/2.0,p-2.0); + const double in = 1.0/((double) n); + + //info("\tCalculating class idx and omega ... "); + for (i=0; iH, K, i, j); + r = matrix_get(model->R, K, i, j); + value += (h*r > 0) ? 1 : 0; + omega += pow(h, p)*r; + } + ClassIdx[i] = (value <= 1.0) ? 1 : 0; + Omega[i] = (1.0/p)*pow(omega, 1.0/p - 1.0); + } + + //info("done\n"); + //info("\tCalculating A and B ... "); + + Memset(B, double, n*(K-1)); + for (i=0; iQ, K, i, j); + if (q < -kappa) { + a = 0.25/(0.5 - kappa/2.0 - q); + b = 0.5; + } else if (q <= 1.0) { + a = 1.0/(2.0*kappa + 2.0); + b = (1.0 - q)*a; + } else { + a = -0.25/(0.5 - kappa/2.0 - q); + b = 0; + } + for (k=0; kUU, K-1, K, i, k, j); + matrix_add(B, K-1, i, k, Bvalue); + } + Avalue += a*matrix_get(model->R, K, i, j); + } + } else { + if (2.0 - p < 0.0001) { + for (j=0; jQ, K, i, j); + if (q < -kappa) { + b = 0.5*p*pow(0.5 - kappa/2.0 - q, p-1.0); + } else if ( q <= 1.0) { + b = pow(1.0 - q, 3.0)/pow(2.0*kappa + 2.0, 2.0); + } else { + b = 0; + } + for (k=0; kUU, K-1, K, i, k, j); + matrix_add(B, K-1, i, k, Bvalue); + } + } + Avalue = 1.5*(K - 1.0); + } else { + for (j=0; jQ, K, i, j); + if (q < -kappa) { + a = 0.25*pow(p, 2.0)*pow(0.5 - kappa/2.0 - q, p - 2.0); + b = 0.5*p*pow(0.5 - kappa/2.0 - q, p - 1.0); + } else if ( q <= 1.0) { + a = a2g2; + b = p*pow(1.0 - q, 2.0*p - 1.0)/pow(2*kappa+2.0, p); + } else { + a = 0.25*pow(p, 2.0)*pow((p/(p - 2.0))*(0.5 - kappa/2.0 - q),p - 2.0); + b = a*(2.0*q + kappa - 1.0)/(p - 2.0) + 0.5*p*pow((p/(p - 2.0))*(0.5 - kappa/2.0 - q), p - 1.0); + } + for (k=0; kUU, K-1, K, i, k, j); + matrix_add(B, K-1, i, k, Bvalue); + } + Avalue += a*matrix_get(model->R, K, i, j); + } + } + Avalue *= Omega[i]; + } + A[i] = in*rho[i]*Avalue; + } + + //print_matrix(ZAZ, m+1, m+1); + //info("done\n"); + //info("\tCalculating ZAZ ... "); + // Now we calculate the matrix ZAZ. Since this is + // guaranteed to be symmetric, we only calculate the + // upper part of the matrix, and then copy this over + // to the lower part after all calculations are done. + // Note that the use of dsym is faster than dspr, even + // though dspr uses less memory. + Memset(ZAZ, double, (m+1)*(m+1)); + for (i=0; iZ[i*(m+1)], + 1, + ZAZ, + m+1); + } + //print_matrix(ZAZ, m+1, m+1); + + //info("done\n"); + // Copy upper to lower (necessary because we need to switch + // to Col-Major order for LAPACK). + // + // TODO: this needs verification! It might also work without + // this step + for (i=0; iV, + K-1, + 0.0, + ZAZV, + K-1); + //info("done\n"); + //info("\tRunning dgemm ... "); + cblas_dgemm( + CblasRowMajor, + CblasTrans, + CblasNoTrans, + m+1, + K-1, + n, + 1.0, + data->Z, + m+1, + B, + K-1, + 1.0, + ZAZV, + K-1); + /* + * Add lambda to all diagonal elements except the + * first one. + */ + i = 0; + for (j=0; jlambda; + + // For the LAPACK call we need to switch to Column- + // Major order. This is unnecessary for the matrix + // ZAZ because it is symmetric. The matrix ZAZV + // must be converted however. + for (i=0; iVbar; + model->Vbar = model->V; + model->V = ZAZVT; + ZAZVT = ptr; + */ + + for (i=0; iVbar, K-1, i, j); + matrix_set(model->Vbar, K-1, i, j, matrix_get(model->V, K-1, i, j)); + matrix_set(model->V, K-1, i, j, matrix_get(ZAZV, K-1, i, j)); + matrix_set(ZAZV, K-1, i, j, value); + } + } + +} + + +void initialize_weights(struct Data *data, struct Model *model) +{ + int *groups; + long i; + + long n = model->n; + long K = model->K; + + if (model->weight_idx == 1) { + for (i=0; irho[i] = 1.0; + } + else if (model->weight_idx == 2) { + groups = Malloc(int, K); + for (i=0; iy[i]-1]++; + } + for (i=0; irho[i] = 1.0/((double) groups[data->y[i]-1]); + } + } else { + fprintf(stderr, "Unknown weight specification.\n"); + exit(1); + } +} + diff --git a/src/libMSVMMaj.o b/src/libMSVMMaj.o new file mode 100644 index 0000000..d78f60d Binary files /dev/null and b/src/libMSVMMaj.o differ diff --git a/src/trainMSVMMaj.c b/src/trainMSVMMaj.c new file mode 100644 index 0000000..cf31b63 --- /dev/null +++ b/src/trainMSVMMaj.c @@ -0,0 +1,121 @@ +#include "libMSVMMaj.h" + +#define MINARGS 2 + +void print_null(const char *s) {} + +void parse_command_line(int argc, char **argv, char *input_filename, struct Model *model); +void exit_with_help(); + +void exit_with_help() +{ + printf("This is MSVMMaj, version %1.1f\n\n", VERSION); + printf("Usage: trainMSVMMaj [options] training_data_file [output_file]\n"); + printf("Options:\n"); + printf("-p p-value : set the value of p in the lp norm (1.0 <= p <= 2.0)\n"); + printf("-l lambda : set the value of lambda (lambda > 0)\n"); + printf("-e epsilon : set the value of the stopping criterion\n"); + printf("-k kappa : set the value of kappa used in the Huber hinge\n"); + printf("-r rho : choose the weigth specification (1 = unit, 2 = group)\n"); + printf("-q : quiet mode (no output)\n"); + printf("-h | -help : print this help.\n"); + + exit(0); +} + + +/* + Main +*/ +int main(int argc, char **argv) +{ + char input_filename[MAX_LINE_LENGTH]; + struct Model *model = Malloc(struct Model, 1); + model->n = 0; + model->K = 0; + model->m = 0; + struct Data *data = Malloc(struct Data, 1); + + if (argc < MINARGS || check_argv(argc, argv, "-help") || check_argv_eq(argc, argv, "-h") ) { + exit_with_help(); + } + parse_command_line(argc, argv, input_filename, model); + + // read data and allocate all memory for the model + read_data(data, model, input_filename); + allocate_model(model); + initialize_weights(data, model); + + main_loop(model, data); + + // free model and data + free_model(model); + free_data(data); + + return 0; + +} + +void parse_command_line(int argc, char **argv, char *input_filename, struct Model *model) +{ + int i; + void (*print_func)(const char*) = NULL; + + // default values + model->p = 1.0; + model->lambda = pow(2, -8.0); + model->epsilon = 1e-6; + model->kappa = 0.0; + model->weight_idx = 1; + + // parse options + for (i=1; i=argc) { + exit_with_help(); + } + switch (argv[i-1][1]) { + case 'p': + model->p = atof(argv[i]); + break; + case 'l': + model->lambda = atof(argv[i]); + break; + case 'e': + model->epsilon = atof(argv[i]); + break; + case 'k': + model->kappa = atof(argv[i]); + break; + case 'r': + model->weight_idx = atoi(argv[i]); + break; + case 'q': + print_func = &print_null; + i--; + break; + default: + fprintf(stderr, "Unknown option: -%c\n", argv[i-1][1]); + exit_with_help(); + } + } + + // set print function + set_print_string_function(print_func); + + // read input filename + if (i >= argc) + exit_with_help(); + + strcpy(input_filename, argv[i]); +} + + + + + + + + + + diff --git a/src/util.c b/src/util.c new file mode 100644 index 0000000..8632733 --- /dev/null +++ b/src/util.c @@ -0,0 +1,311 @@ +#include "util.h" + +/* + Read the data from the data_file. The data matrix X is augmented + with a column of ones, to get the matrix Z. +*/ +void read_data(struct Data *dataset, struct Model *model, char *data_file) +{ + FILE *fid; + long i, j; + long n, m; // dimensions of data + long nr = 0; // used to check consistency of data + double value; + long K = 0; + long min_y = 1000; + + char buf[MAX_LINE_LENGTH]; + + if ((fid = fopen(data_file, "r")) == NULL) { + printf("\nERROR: datafile %s could not be opened.\n", + data_file); + exit(0); + } + + // Read data dimensions + nr += fscanf(fid, "%ld", &n); + nr += fscanf(fid, "%ld", &m); + + // Allocate memory + dataset->Z = Malloc(double, n*(m+1)); + + // Read first line of data + for (j=1; jZ, n, 0, j, value); + } + + // Check if there is a label at the end of the line + if (fgets(buf, MAX_LINE_LENGTH, fid) == NULL) { + printf("ERROR: No label found on first line.\n"); + exit(1); + } + if (sscanf(buf, "%lf", &value) > 0) { + dataset->y = Malloc(long, n); + dataset->y[0] = value; + } else if (dataset->y != NULL) { + free(dataset->y); + dataset->y = NULL; + } + + // Read the rest of the file + for (i=1; iZ, m+1, i, j, value); + } + if (dataset->y != NULL) { + nr += fscanf(fid, "%lf", &value); + dataset->y[i] = (long) value; + K = maximum(K, value); + min_y = minimum(min_y, value); + } + } + fclose(fid); + + // Correct labels: must be in [1, K] + if (min_y == 0) { + for (i=0; iy[i]++; + } else if (min_y < 0 ) { + printf("ERROR: wrong class labels in %s, minimum value is: %ld\n", + data_file, min_y); + exit(0); + } + + if (nr < n * m) { + printf("ERROR: not enough data found in %s\n", data_file); + exit(0); + } + + // Set the column of ones + for (i=0; iZ, m+1, i, 0, 1.0); + + dataset->n = n; + dataset->m = m; + dataset->K = K; + + model->n = n; + model->m = m; + model->K = K; + + info("Succesfully read data file: %s\n", data_file); +} + +int check_argv(int argc, char **argv, char *str) +{ + int i; + int arg_str = 0; + for (i=1; in; + long m = model->m; + long K = model->K; + + model->W = Calloc(double, m*(K-1)); + if (model->W == NULL) { + fprintf(stderr, "Failed to allocate memory for W.\n"); + exit(1); + } + + model->t = Calloc(double, K-1); + if (model->t == NULL) { + fprintf(stderr, "Failed to allocate memory for t.\n"); + exit(1); + } + + model->V = Calloc(double, (m+1)*(K-1)); + if (model->V == NULL) { + fprintf(stderr, "Failed to allocate memory for V.\n"); + exit(1); + } + + model->Vbar = Calloc(double, (m+1)*(K-1)); + if (model->Vbar == NULL) { + fprintf(stderr, "Failed to allocate memory for Vbar.\n"); + exit(1); + } + + model->U = Calloc(double, K*(K-1)); + if (model->U == NULL) { + fprintf(stderr, "Failed to allocate memory for U.\n"); + exit(1); + } + + model->UU = Calloc(double, n*K*(K-1)); + if (model->UU == NULL) { + fprintf(stderr, "Failed to allocate memory for UU.\n"); + exit(1); + } + + model->Q = Calloc(double, n*K); + if (model->Q == NULL) { + fprintf(stderr, "Failed to allocate memory for Q.\n"); + exit(1); + } + + model->H = Calloc(double, n*K); + if (model->H == NULL) { + fprintf(stderr, "Failed to allocate memory for H.\n"); + exit(1); + } + + model->R = Calloc(double, n*K); + if (model->R == NULL) { + fprintf(stderr, "Failed to allocate memory for R.\n"); + exit(1); + } + + model->rho = Calloc(double, n); + if (model->rho == NULL) { + fprintf(stderr, "Failed to allocate memory for rho.\n"); + exit(1); + } + +} + +void free_model(struct Model *model) +{ + free(model->W); + free(model->t); + free(model->V); + free(model->Vbar); + free(model->U); + free(model->UU); + free(model->Q); + free(model->H); + free(model->rho); + free(model->R); + + free(model); +} + +void free_data(struct Data *data) +{ + free(data->Z); + free(data->y); + free(data); +} + +void print_matrix(double *M, long rows, long cols) +{ + long i, j; + for (i=0; i