aboutsummaryrefslogtreecommitdiff
path: root/docs/cls_gensvm.rst
diff options
context:
space:
mode:
Diffstat (limited to 'docs/cls_gensvm.rst')
-rw-r--r--docs/cls_gensvm.rst33
1 files changed, 26 insertions, 7 deletions
diff --git a/docs/cls_gensvm.rst b/docs/cls_gensvm.rst
index fc19bf4..b4bc9a7 100644
--- a/docs/cls_gensvm.rst
+++ b/docs/cls_gensvm.rst
@@ -1,5 +1,5 @@
-.. py:class:: GenSVM(p=1.0, lmd=1e-05, kappa=0.0, epsilon=1e-06, weights='unit', kernel='linear', gamma='auto', coef=0.0, degree=2.0, kernel_eigen_cutoff=1e-08, verbose=0, random_state=None, max_iter=100000000.0)
+.. py:class:: GenSVM(p=1.0, lmd=1e-05, kappa=0.0, epsilon=1e-06, weights='unit', kernel='linear', gamma='auto', coef=1.0, degree=2.0, kernel_eigen_cutoff=1e-08, verbose=0, random_state=None, max_iter=100000000.0)
:noindex:
:module: gensvm.core
@@ -21,6 +21,10 @@
:type kappa: float, optional (default=0.0)
:param weights: Type of sample weights to use. Options are 'unit' for unit weights and
'group' for group size correction weights (equation 4 in the paper).
+
+ It is also possible to provide an explicit vector of sample weights
+ through the :func:`~GenSVM.fit` method. If so, it will override the
+ setting provided here.
:type weights: string, optional (default='unit')
:param kernel: Specify the kernel type to use in the classifier. It must be one of
'linear', 'poly', 'rbf', or 'sigmoid'.
@@ -31,7 +35,7 @@
:type gamma: float, optional (default='auto')
:param coef: Kernel parameter for the poly and sigmoid kernel. See `Kernels in
GenSVM <gensvm_kernels_>`_ for the exact implementation of the kernels.
- :type coef: float, optional (default=0.0)
+ :type coef: float, optional (default=1.0)
:param degree: Kernel parameter for the poly kernel. See `Kernels in GenSVM
<gensvm_kernels_>`_ for the exact implementation of the kernels.
:type degree: float, optional (default=2.0)
@@ -42,6 +46,10 @@
:type kernel_eigen_cutoff: float, optional (default=1e-8)
:param verbose: Enable verbose output
:type verbose: int, (default=0)
+ :param random_state: The seed for the random number generation used for initialization where
+ necessary. See the documentation of
+ ``sklearn.utils.check_random_state`` for more info.
+ :type random_state: None, int, instance of RandomState
:param max_iter: The maximum number of iterations to be run.
:type max_iter: int, (default=1e8)
@@ -65,6 +73,10 @@
*int* -- The number of support vectors that were found
+ .. attribute:: SVs_
+
+ *array, shape = [n_observations, ]* -- Index vector that marks the support vectors (1 = SV, 0 = no SV)
+
.. seealso::
:class:`.GenSVMGridSearchCV`
@@ -75,7 +87,7 @@
- .. py:method:: GenSVM.fit(X, y, seed_V=None)
+ .. py:method:: GenSVM.fit(X, y, sample_weight=None, seed_V=None)
:noindex:
:module: gensvm.core
@@ -88,6 +100,10 @@
:type X: array, shape = (n_observations, n_features)
:param y: The label vector, labels can be numbers or strings.
:type y: array, shape = (n_observations, )
+ :param sample_weight: Array of weights that are assigned to individual samples. If not
+ provided, then the weight specification in the constructor is used
+ ('unit' or 'group').
+ :type sample_weight: array, shape = (n_observations, )
:param seed_V: Seed coefficient array to use as a warm start for the optimization.
It can for instance be the :attr:`combined_coef_
<.GenSVM.combined_coef_>` attribute of a different GenSVM model.
@@ -106,15 +122,18 @@
:rtype: object
- .. py:method:: GenSVM.predict(X)
+ .. py:method:: GenSVM.predict(X, trainX=None)
:noindex:
:module: gensvm.core
Predict the class labels on the given data
- :param X:
- :type X: array, shape = [n_samples, n_features]
+ :param X: Data for which to predict the labels
+ :type X: array, shape = [n_test_samples, n_features]
+ :param trainX: Only for nonlinear prediction with kernels: the training data used
+ to train the model.
+ :type trainX: array, shape = [n_train_samples, n_features]
- :returns: **y_pred**
+ :returns: **y_pred** -- Predicted class labels of the data in X.
:rtype: array, shape = (n_samples, )