diff options
| author | Gertjan van den Burg <gertjanvandenburg@gmail.com> | 2018-03-30 21:22:26 +0100 |
|---|---|---|
| committer | Gertjan van den Burg <gertjanvandenburg@gmail.com> | 2018-03-30 21:22:26 +0100 |
| commit | 93115020ec89c7f549ef5dab50e5270b09830894 (patch) | |
| tree | 32d2af78bd670b196e9e51d07c0efd245aee69d6 | |
| parent | Get column names from x directly (diff) | |
| download | rgensvm-93115020ec89c7f549ef5dab50e5270b09830894.tar.gz rgensvm-93115020ec89c7f549ef5dab50e5270b09830894.zip | |
Update docs and remove xtrain from output list
| -rw-r--r-- | R/gensvm.R | 10 | ||||
| -rw-r--r-- | man/gensvm.Rd | 10 |
2 files changed, 9 insertions, 11 deletions
@@ -4,7 +4,10 @@ #' with the given parameters. See the package documentation #' (\code{\link{gensvm-package}}) for more general information about GenSVM. #' -#' @param X data matrix with the predictors +#' @param x data matrix with the predictors. \cr\cr +#' Note that for SVMs categorical features should be converted to binary dummy +#' features. This can be done with using the \code{\link{model.matrix}} +#' function (i.e. \code{model.matrix( ~ var - 1)}). #' @param y class labels #' @param p parameter for the L_p norm of the loss function (1.0 <= p <= 2.0) #' @param lambda regularization parameter for the loss function (lambda > 0) @@ -56,9 +59,6 @@ #' \item{n.iter}{Number of iterations performed in training} #' \item{n.support}{Number of support vectors in the final model} #' \item{training.time}{Total training time} -#' \item{X.train}{When training with nonlinear kernels, the training data is -#' needed to perform prediction. For these kernels it is therefore stored in -#' the fitted model.} #' #' @note #' This function returns partial results when the computation is interrupted by @@ -182,8 +182,6 @@ gensvm <- function(X, y, p=1.0, lambda=1e-8, kappa=0.0, epsilon=1e-6, classes = classes, V = out$V, n.iter = out$n.iter, n.support = out$n.support, training.time = out$training.time, - X.train = if(kernel == 'linear') NULL else X, - feature.names = colnames(X)) class(object) <- "gensvm" return(object) diff --git a/man/gensvm.Rd b/man/gensvm.Rd index 7a55bd6..5aabcaa 100644 --- a/man/gensvm.Rd +++ b/man/gensvm.Rd @@ -4,13 +4,16 @@ \alias{gensvm} \title{Fit the GenSVM model} \usage{ -gensvm(X, y, p = 1, lambda = 1e-08, kappa = 0, epsilon = 1e-06, +gensvm(x, y, p = 1, lambda = 1e-08, kappa = 0, epsilon = 1e-06, weights = "unit", kernel = "linear", gamma = "auto", coef = 1, degree = 2, kernel.eigen.cutoff = 1e-08, verbose = FALSE, random.seed = NULL, max.iter = 1e+08, seed.V = NULL) } \arguments{ -\item{X}{data matrix with the predictors} +\item{x}{data matrix with the predictors. \cr\cr +Note that for SVMs categorical features should be converted to binary dummy +features. This can be done with using the \code{\link{model.matrix}} +function (i.e. \code{model.matrix( ~ var - 1)}).} \item{y}{class labels} @@ -77,9 +80,6 @@ eigendecomposition of the kernel matrix.} \item{n.iter}{Number of iterations performed in training} \item{n.support}{Number of support vectors in the final model} \item{training.time}{Total training time} -\item{X.train}{When training with nonlinear kernels, the training data is -needed to perform prediction. For these kernels it is therefore stored in -the fitted model.} } \description{ Fits the Generalized Multiclass Support Vector Machine model |
