NuSVC: Nu-Support Vector Classification#

  • Es una reparametrización matematicamente equivalente de la C-SVC.

  • El parámetro \nu reemplaza a C y controla el número de vectors de soporte.

[1]:
import numpy as np

X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
[2]:
from sklearn.svm import NuSVC

nuSVC = NuSVC(
    # ----------------------------------------------------------------------------
    # An upper bound on the fraction of margin errors (see User Guide) and a
    # lower bound of the fraction of support vectors. Should be in the interval
    # (0, 1].
    nu=0.5,
    # ----------------------------------------------------------------------------
    # Specifies the kernel type to be used in the algorithm. If none is given,
    # ‘rbf’ will be used.
    # * 'linear'
    # * 'poly'
    # * rbf'
    # * 'sigmoid'
    kernel="rbf",
    # ----------------------------------------------------------------------------
    # Degree of the polynomial kernel function (‘poly’). Must be non-negative.
    # Ignored by all other kernels.
    degree=3,
    # ----------------------------------------------------------------------------
    # Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.
    # * if gamma='scale' (default) is passed then it uses
    #   1 / (n_features * X.var()) as value of gamma,
    # * if ‘auto’, uses 1 / n_features
    # * if float, must be non-negative.
    gamma="scale",
    # ----------------------------------------------------------------------------
    # Independent term in kernel function. It is only significant in ‘poly’ and
    # ‘sigmoid’.
    coef0=0.0,
    # ----------------------------------------------------------------------------
    # Whether to enable probability estimates. This must be enabled prior to
    # calling fit, will slow down that method as it internally uses 5-fold
    # cross-validation, and predict_proba may be inconsistent with predict.
    probability=False,
    # ----------------------------------------------------------------------------
    # Tolerance for stopping criterion.
    tol=1e-3,
    # ----------------------------------------------------------------------------
    # Set the parameter C of class i to class_weight[i]*C for SVC. If not given,
    #  all classes are supposed to have weight one. The “balanced” mode uses the
    # values of y to automatically adjust weights inversely proportional to
    # class frequencies as n_samples / (n_classes * np.bincount(y)).
    class_weight=None,
    # ----------------------------------------------------------------------------
    # Hard limit on iterations within solver, or -1 for no limit.
    max_iter=-1,
    # ----------------------------------------------------------------------------
    # Whether to return a one-vs-rest (‘ovr’) decision function of shape
    # (n_samples, n_classes) as all other classifiers, or the original
    # one-vs-one (‘ovo’) decision function of libsvm which has shape
    # (n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one (‘ovo’)
    # is always used as multi-class strategy. The parameter is ignored for
    # binary classification.
    decision_function_shape="ovr",
    # ----------------------------------------------------------------------------
    # Controls the pseudo random number generation for shuffling the data for
    # probability estimates. Ignored when probability is False. Pass an int for
    # reproducible output across multiple function calls.
    random_state=None,
)

nuSVC.fit(X, y)
nuSVC.predict(X)
[2]:
array([1, 1, 2, 2])
[3]:
nuSVC.dual_coef_
[3]:
array([[-0.73537894, -0.46684045,  0.73597922,  0.46624018]])
[4]:
nuSVC.intercept_
[4]:
array([-1.47673101e-05])
[5]:
nuSVC.support_
[5]:
array([0, 1, 2, 3], dtype=int32)
[6]:
nuSVC.support_vectors_
[6]:
array([[-1., -1.],
       [-2., -1.],
       [ 1.,  1.],
       [ 2.,  1.]])
[7]:
# Number of support vectors for each class.
nuSVC.n_support_
[7]:
array([2, 2], dtype=int32)