diff --git a/onnxruntime/core/providers/cpu/ml/ml_common.h b/onnxruntime/core/providers/cpu/ml/ml_common.h index 4154982405c24..9ebae677870a0 100644 --- a/onnxruntime/core/providers/cpu/ml/ml_common.h +++ b/onnxruntime/core/providers/cpu/ml/ml_common.h @@ -281,40 +281,50 @@ static inline void compute_softmax_zero(std::vector& values) { } static inline void write_scores(std::vector& scores, POST_EVAL_TRANSFORM post_transform, int64_t write_index, Tensor* Z, int add_second_class) { - if (post_transform == POST_EVAL_TRANSFORM::PROBIT && scores.size() == 1) { - scores[0] = ml_sqrt2 * ml_inv_erf(2 * scores[0] - 1); - Z->template MutableData()[write_index] = scores[0]; - } else if (scores.size() >= 2) { //multiclass - if (post_transform == POST_EVAL_TRANSFORM::LOGISTIC) { - for (float& score : scores) { - score = ml_logit(score); - } - } else if (post_transform == POST_EVAL_TRANSFORM::SOFTMAX) { - compute_softmax(scores); - } else if (post_transform == POST_EVAL_TRANSFORM::SOFTMAX_ZERO) { - compute_softmax_zero(scores); + if (scores.size() >= 2) { //multiclass + switch ((unsigned char)post_transform) { + case (unsigned char)POST_EVAL_TRANSFORM::LOGISTIC: + for (float& score : scores) + score = ml_logit(score); + break; + case (unsigned char)POST_EVAL_TRANSFORM::SOFTMAX: + compute_softmax(scores); + break; + case (unsigned char)POST_EVAL_TRANSFORM::SOFTMAX_ZERO: + compute_softmax_zero(scores); + break; } - } else { //binary case - if (add_second_class == 0 && scores.size() == 1) { //0=all positive weights, winning class is positive - scores.push_back(scores[0]); - scores[0] = 1.f - scores[0]; //put opposite score in positive slot - } else if (add_second_class == 1 && scores.size() == 1) { //1 = all positive weights, winning class is negative - scores.push_back(scores[0]); - scores[0] = 1.f - scores[0]; //put opposite score in positive slot - } else if (add_second_class == 2 && scores.size() == 1) { //2 = mixed weights, winning class is positive - if (post_transform == POST_EVAL_TRANSFORM::LOGISTIC) { - scores.push_back(ml_logit(scores[0])); //ml_logit(scores[k]); - scores[0] = ml_logit(-scores[0]); - } else { - scores.push_back(scores[0]); - scores[0] = -scores[0]; - } - } else if (add_second_class == 3 && scores.size() == 1) { //3 = mixed weights, winning class is negative - if (post_transform == POST_EVAL_TRANSFORM::LOGISTIC) { - scores.push_back(ml_logit(scores[0])); //ml_logit(scores[k]); - scores[0] = ml_logit(-scores[0]); - } else { - scores.push_back(-scores[0]); + } else if (scores.size() == 1) { //binary case + if (post_transform == POST_EVAL_TRANSFORM::PROBIT) { + scores[0] = ml_sqrt2 * ml_inv_erf(2 * scores[0] - 1); + //Z->template MutableData()[write_index] = scores[0]; + } else { + switch (add_second_class) { + case 0: //0=all positive weights, winning class is positive + scores.push_back(scores[0]); + scores[0] = 1.f - scores[0]; //put opposite score in positive slot + break; + case 1: //1 = all positive weights, winning class is negative + scores.push_back(scores[0]); + scores[0] = 1.f - scores[0]; //put opposite score in positive slot + break; + case 2: //2 = mixed weights, winning class is positive + if (post_transform == POST_EVAL_TRANSFORM::LOGISTIC) { + scores.push_back(ml_logit(scores[0])); //ml_logit(scores[k]); + scores[0] = ml_logit(-scores[0]); + } else { + scores.push_back(scores[0]); + scores[0] = -scores[0]; + } + break; + case 3: //3 = mixed weights, winning class is negative + if (post_transform == POST_EVAL_TRANSFORM::LOGISTIC) { + scores.push_back(ml_logit(scores[0])); //ml_logit(scores[k]); + scores[0] = ml_logit(-scores[0]); + } else { + scores.push_back(-scores[0]); + } + break; } } } diff --git a/onnxruntime/core/providers/cpu/ml/svmclassifier.cc b/onnxruntime/core/providers/cpu/ml/svmclassifier.cc index a740d989ae078..4cd3cd2aeea56 100644 --- a/onnxruntime/core/providers/cpu/ml/svmclassifier.cc +++ b/onnxruntime/core/providers/cpu/ml/svmclassifier.cc @@ -36,7 +36,7 @@ SVMClassifier::SVMClassifier(const OpKernelInfo& info) // one of these should be valid ONNXRUNTIME_ENFORCE(info.GetAttrs("classlabels_strings", classlabels_strings_).IsOK() || - info.GetAttrs("classlabels_ints", classlabels_ints_).IsOK()); + info.GetAttrs("classlabels_ints", classlabels_ints_).IsOK()); vector_count_ = 0; feature_count_ = 0; @@ -75,6 +75,25 @@ SVMClassifier::SVMClassifier(const OpKernelInfo& info) } } +#define SETSCORESVM(typlabels, classlabels, posclass, negclass) \ + if (classlabels.size() == 2) { \ + write_additional_scores = post_transform_ == POST_EVAL_TRANSFORM::NONE ? 2 : 0; \ + if (proba_.size() == 0) { \ + if (weights_are_all_positive_ && maxweight >= 0.5) \ + Y->template MutableData()[n] = classlabels[1]; \ + else if (maxweight > 0 && !weights_are_all_positive_) \ + Y->template MutableData()[n] = classlabels[1]; \ + else \ + Y->template MutableData()[n] = classlabels[maxclass]; \ + } else { \ + Y->template MutableData()[n] = classlabels[maxclass]; \ + } \ + } else if (maxweight > 0) { \ + Y->template MutableData()[n] = posclass; \ + } else { \ + Y->template MutableData()[n] = negclass; \ + } + template Status SVMClassifier::Compute(OpKernelContext* ctx) const { const Tensor* X = ctx->Input(0); @@ -83,14 +102,13 @@ Status SVMClassifier::Compute(OpKernelContext* ctx) const { int64_t N = X->Shape().NumDimensions() == 1 ? 1 : X->Shape()[0]; Tensor* Y = ctx->Output(0, TensorShape({N})); - Tensor* Z; - std::vector dims; - if (mode_ == SVM_TYPE::SVM_SVC && proba_.size() == 0) - dims = {static_cast(N), static_cast(class_count_ * (class_count_ - 1) / 2)}; - else - dims = {static_cast(N), static_cast(class_count_)}; - Z = ctx->Output(1, TensorShape(dims)); + int64_t nc = (proba_.size() > 0 || vector_count_ == 0) + ? class_count_ + : (class_count_ > 2 ? class_count_ * (class_count_ - 1) / 2 : 2); + dims = {static_cast(N), static_cast(nc)}; + Tensor* Z = ctx->Output(1, TensorShape(dims)); + const auto* x_data = X->template Data(); int64_t zindex = 0; @@ -99,24 +117,34 @@ Status SVMClassifier::Compute(OpKernelContext* ctx) const { { int64_t current_weight_0 = n * stride; int64_t maxclass = -1; - double maxweight = 0.f; std::vector decisions; std::vector scores; std::vector kernels; std::vector votes; + float sum; + + if (vector_count_ == 0 && mode_ == SVM_TYPE::SVM_LINEAR) { + // This was in the original code but it does not appear in libsvm or scikit-learn. + for (int64_t j = 0; j < class_count_; j++) { //for each class + float val = kernel_dot(x_data, current_weight_0, coefficients_, feature_count_ * j, + feature_count_, get_kernel_type()); + val += rho_[0]; + scores.push_back(val); + } + } else { + if (vector_count_ == 0) + return Status(common::ONNXRUNTIME, common::FAIL, "No support vectors."); + int evals = 0; - if (mode_ == SVM_TYPE::SVM_SVC) { for (int64_t j = 0; j < vector_count_; j++) { - float val = kernel_dot(x_data, current_weight_0, support_vectors_, feature_count_ * j, feature_count_, get_kernel_type()); + float val = kernel_dot(x_data, current_weight_0, support_vectors_, feature_count_ * j, + feature_count_, get_kernel_type()); kernels.push_back(val); } - for (int64_t j = 0; j < class_count_; j++) { - votes.push_back(0); - } - int evals = 0; - for (int64_t i = 0; i < class_count_; i++) { //for each class - for (int64_t j = i + 1; j < class_count_; j++) { //for each class - float sum = 0; + votes.resize(class_count_, 0); + for (int64_t i = 0; i < class_count_; i++) { // for each class + for (int64_t j = i + 1; j < class_count_; j++) { // for each class + sum = 0; int64_t start_index_i = starting_vector_[i]; // *feature_count_; int64_t start_index_j = starting_vector_[j]; // *feature_count_; @@ -125,120 +153,107 @@ Status SVMClassifier::Compute(OpKernelContext* ctx) const { int64_t pos1 = (vector_count_) * (j - 1); int64_t pos2 = (vector_count_) * (i); - for (int64_t m = 0; m < class_i_support_count; m++) { - float val1 = coefficients_[pos1 + start_index_i + m]; - float val2 = kernels[start_index_i + m]; - sum += val1 * val2; - } - for (int64_t m = 0; m < class_j_support_count; m++) { - float val1 = coefficients_[pos2 + start_index_j + m]; - float val2 = kernels[start_index_j + m]; - sum += val1 * val2; - } + float* val1 = (float*)&(coefficients_[pos1 + start_index_i]); + float* val2 = (float*)&(kernels[start_index_i]); + for (int64_t m = 0; m < class_i_support_count; ++m, ++val1, ++val2) + sum += *val1 * *val2; + + val1 = (float*)&(coefficients_[pos2 + start_index_j]); + val2 = (float*)&(kernels[start_index_j]); + for (int64_t m = 0; m < class_j_support_count; ++m, ++val1, ++val2) + sum += *val1 * *val2; sum += rho_[evals]; scores.push_back(sum); - if (sum > 0) { - votes[i]++; - } else { - votes[j]++; - } - evals++; //index into rho + ++(votes[sum > 0 ? i : j]); + ++evals; //index into rho } } - } else if (mode_ == SVM_TYPE::SVM_LINEAR) { //liblinear - for (int64_t j = 0; j < class_count_; j++) { //for each class - float val = kernel_dot(x_data, current_weight_0, coefficients_, feature_count_ * j, feature_count_, get_kernel_type()); - val += rho_[0]; - scores.push_back(val); - } } + if (proba_.size() > 0 && mode_ == SVM_TYPE::SVM_SVC) { //compute probabilities from the scores - std::vector estimates; - std::vector probsp2; int64_t num = class_count_ * class_count_; - for (int64_t m = 0; m < num; m++) { - probsp2.push_back(0.f); //min prob - } - for (int64_t m = 0; m < class_count_; m++) { - estimates.push_back(0.f); //min prob - } + std::vector probsp2(num, 0.f); + std::vector estimates(class_count_, 0.f); int64_t index = 0; - for (int64_t i = 0; i < class_count_; i++) { - for (int64_t j = i + 1; j < class_count_; j++) { + int64_t p1, p2; + for (int64_t i = 0; i < class_count_; ++i) { + p1 = i * class_count_ + i + 1; + p2 = (i + 1) * class_count_ + i; + for (int64_t j = i + 1; j < class_count_; ++j, ++index) { float val1 = sigmoid_probability(scores[index], proba_[index], probb_[index]); float val2 = std::max(val1, 1.0e-7f); - probsp2[i * class_count_ + j] = std::min(val2, 1 - 1.0e-7f); - probsp2[j * class_count_ + i] = 1 - probsp2[i * class_count_ + j]; - index++; + val2 = std::min(val2, 1 - 1.0e-7f); + probsp2[p1] = val2; + probsp2[p2] = 1 - val2; + ++p1; + p2 += class_count_; } } multiclass_probability(class_count_, probsp2, estimates); //copy probabilities back into scores scores.resize(estimates.size()); - for (int64_t k = 0; k < static_cast(estimates.size()); k++) { - scores[k] = estimates[k]; + std::copy(estimates.begin(), estimates.end(), scores.begin()); +#if false + // Normalization OVR as implemented in scikit-learn. + } else if (proba_.size() == 0) { + // Applies function first part of _ovr_decision_function (scikit-learn). + // ONNX specs imposes one column per class. Libsvm does not do it, scikit-learn does. + // If OVR_NORM is defined the function also applies normalisation as + // scikit-learn would do in function _ovr_decision_function. + // This method has a major drawback because the scores depends on the other observations + // due to a rescaling based on a maximum obtained for all predictions + // (observations are not independant). + /* + for i in range(n_classes): + for j in range(i + 1, n_classes): + sum_of_confidences[:, i] -= confidences[:, k] + sum_of_confidences[:, j] += confidences[:, k] + k += 1 + + max_confidences = sum_of_confidences.max() + min_confidences = sum_of_confidences.min() + + if max_confidences == min_confidences: + return votes + + eps = np.finfo(sum_of_confidences.dtype).eps + max_abs_confidence = max(abs(max_confidences), abs(min_confidences)) + scale = (0.5 - eps) / max_abs_confidence + return votes + sum_of_confidences * scale + */ + std::vector conf(class_count_, 0.f); + float* ps = &(scores[0]); + for (int64_t i = 0; i < class_count_; ++i) { + for (int64_t j = i + 1; j < class_count_; ++j, ++ps) { + conf[i] += *ps; + conf[j] -= *ps; + } } + + scores = conf; +#endif } - int64_t maxvotes = 0; + + double maxweight = 0; if (votes.size() > 0) { - for (int64_t k = 0; k < static_cast(votes.size()); k++) { - if (votes[k] > maxvotes) { - maxvotes = votes[k]; - maxclass = k; - } - } + auto it_maxvotes = std::max_element(votes.begin(), votes.end()); + maxclass = std::distance(votes.begin(), it_maxvotes); } else { - for (int64_t k = 0; k < static_cast(scores.size()); k++) { - if (scores[k] > maxweight) { - maxclass = k; - maxweight = scores[k]; - } - } + auto it_maxweight = std::max_element(scores.begin(), scores.end()); + maxclass = std::distance(scores.begin(), it_maxweight); + maxweight = *it_maxweight; } - //write top class + + // write top class + // onnx specs expects one column per class. int write_additional_scores = -1; - if (rho_.size() == 1) //binary - { + if (rho_.size() == 1) { if (using_strings_) { - if (classlabels_strings_.size() == 2 && weights_are_all_positive_ && maxweight >= 0.5 && proba_.size() == 0) { - Y->template MutableData()[n] = classlabels_strings_[1]; //positive label - write_additional_scores = 0; - } else if (classlabels_strings_.size() == 2 && maxweight > 0 && !weights_are_all_positive_ && proba_.size() == 0) { - Y->template MutableData()[n] = classlabels_strings_[1]; //positive label - write_additional_scores = 0; - } else if (classlabels_strings_.size() == 2 && proba_.size() > 0) { //this case all classes are in their rightful spot - Y->template MutableData()[n] = classlabels_strings_[maxclass]; //whichever label - write_additional_scores = -1; - } else if (classlabels_strings_.size() == 2) { - Y->template MutableData()[n] = classlabels_strings_[0]; //negative label - write_additional_scores = 1; - } else if (maxweight > 0) { - Y->template MutableData()[n] = "1"; //positive label - } else { - Y->template MutableData()[n] = "0"; //negative label - } - } else //no strings - { - if (classlabels_ints_.size() == 2 && weights_are_all_positive_ && maxweight >= 0.5 && proba_.size() == 0) { - Y->template MutableData()[n] = classlabels_ints_[1]; //positive label - write_additional_scores = 0; - } else if (classlabels_ints_.size() == 2 && maxweight > 0 && !weights_are_all_positive_ && proba_.size() == 0) { - Y->template MutableData()[n] = classlabels_ints_[0]; //pos label - write_additional_scores = 0; - } else if (classlabels_ints_.size() == 2 && proba_.size() > 0) //this case all classes are in their rightful spot - { - Y->template MutableData()[n] = classlabels_ints_[maxclass]; //whichever label - write_additional_scores = -1; - } else if (classlabels_ints_.size() == 2) { - Y->template MutableData()[n] = classlabels_ints_[0]; //negative label - write_additional_scores = 1; - } else if (maxweight > 0) { - Y->template MutableData()[n] = 1; //positive label - } else { - Y->template MutableData()[n] = 0; //negative label - } + SETSCORESVM(std::string, classlabels_strings_, "1", "0") + } else { + SETSCORESVM(int64_t, classlabels_ints_, 1, 0) } } else { //multiclass if (using_strings_) { diff --git a/onnxruntime/core/providers/cpu/ml/svmclassifier.h b/onnxruntime/core/providers/cpu/ml/svmclassifier.h index f51a6be5d75f0..f3224dfd2429d 100644 --- a/onnxruntime/core/providers/cpu/ml/svmclassifier.h +++ b/onnxruntime/core/providers/cpu/ml/svmclassifier.h @@ -31,28 +31,28 @@ class SVMCommon { float kernel_dot(const T* A, int64_t a, const std::vector& B, int64_t b, int64_t len, KERNEL k) const { float sum = 0.f; + float* pA = (float*)&A[a]; + float* pB = (float*)&B[b]; if (k == KERNEL::POLY) { - for (int64_t i = 0; i < len; i++) { - sum += B[b + i] * static_cast(A[a + i]); - } + for (int64_t i = len; i > 0; --i, ++pA, ++pB) + sum += *pB * static_cast(*pA); sum = gamma_ * sum + coef0_; sum = std::pow(sum, degree_); } else if (k == KERNEL::SIGMOID) { - for (int64_t i = 0; i < len; i++) { - sum += B[b + i] * static_cast(A[a + i]); - } + for (int64_t i = len; i > 0; --i, ++pA, ++pB) + sum += *pB * static_cast(*pA); sum = gamma_ * sum + coef0_; sum = std::tanh(sum); } else if (k == KERNEL::RBF) { - for (int64_t i = 0; i < len; i++) { - float val = static_cast(A[a + i]) - B[b + i]; - sum += (val * val); + float val; + for (int64_t i = len; i > 0; --i, ++pA, ++pB) { + val = static_cast(*pA) - *pB; + sum += val * val; } sum = std::exp(-gamma_ * sum); } else if (k == KERNEL::LINEAR) { - for (int64_t i = 0; i < len; i++) { - sum += B[b + i] * static_cast(A[a + i]); - } + for (int64_t i = len; i > 0; --i, ++pA, ++pB) + sum += *pB * static_cast(*pA); } return sum; } diff --git a/onnxruntime/test/providers/cpu/ml/svmclassifier_test.cc b/onnxruntime/test/providers/cpu/ml/svmclassifier_test.cc index 9b75ebe5616b2..36c130dafe890 100644 --- a/onnxruntime/test/providers/cpu/ml/svmclassifier_test.cc +++ b/onnxruntime/test/providers/cpu/ml/svmclassifier_test.cc @@ -10,14 +10,20 @@ namespace test { TEST(MLOpTest, SVMClassifierMulticlassSVC) { OpTester test("SVMClassifier", 1, onnxruntime::kMLDomain); - std::vector dual_coefficients = {1.14360327f, 1.95968249f, -1.175683f, -1.92760275f, -1.32575698f, -1.32575698f, 0.66332785f, 0.66242913f, 0.53120854f, 0.53510444f, -1.06631298f, -1.06631298f, 0.66332785f, 0.66242913f, 0.53120854f, 0.53510444f, 1.f, -1.f}; - std::vector support_vectors = {0.f, 0.5f, 32.f, 2.f, 2.9f, -32.f, 1.f, 1.5f, 1.f, 3.f, 13.3f, -11.f, 12.f, 12.9f, -312.f, 43.f, 413.3f, -114.f}; + std::vector dual_coefficients = {1.14360327f, 1.95968249f, -1.175683f, -1.92760275f, -1.32575698f, + -1.32575698f, 0.66332785f, 0.66242913f, 0.53120854f, 0.53510444f, + -1.06631298f, -1.06631298f, 0.66332785f, 0.66242913f, 0.53120854f, + 0.53510444f, 1.f, -1.f}; + std::vector support_vectors = {0.f, 0.5f, 32.f, 2.f, 2.9f, -32.f, 1.f, 1.5f, 1.f, 3.f, + 13.3f, -11.f, 12.f, 12.9f, -312.f, 43.f, 413.3f, -114.f}; std::vector classes = {0, 1, 2, 3}; std::vector vectors_per_class = {2, 2, 1, 1}; std::vector rho = {0.5279583f, 0.32605162f, 0.32605162f, 0.06663721f, 0.06663721f, 0.f}; std::vector kernel_params = {0.001f, 0.f, 3.f}; //gamma, coef0, degree - std::vector X = {1.f, 0.0f, 0.4f, 3.0f, 44.0f, -3.f, 12.0f, 12.9f, -312.f, 23.0f, 11.3f, -222.f, 23.0f, 11.3f, -222.f, 23.0f, 3311.3f, -222.f, 23.0f, 11.3f, -222.f, 43.0f, 413.3f, -114.f}; + std::vector X = {1.f, 0.0f, 0.4f, 3.0f, 44.0f, -3.f, 12.0f, 12.9f, -312.f, 23.0f, + 11.3f, -222.f, 23.0f, 11.3f, -222.f, 23.0f, 3311.3f, -222.f, 23.0f, + 11.3f, -222.f, 43.0f, 413.3f, -114.f}; std::vector predictions = {1, 1, 2, 0, 0, 0, 0, 3}; std::vector scores = { -0.956958294f, 0.799815655f, 0.799815655f, 0.988598406f, 0.988598406f, 0, @@ -47,22 +53,28 @@ TEST(MLOpTest, SVMClassifierMulticlassSVC) { TEST(MLOpTest, SVMClassifierMulticlassLinearSVC) { OpTester test("SVMClassifier", 1, onnxruntime::kMLDomain); - std::vector dual_coefficients = {-1.55181212e-01f, 2.42698956e-01f, 7.01893432e-03f, 4.07614474e-01f, -3.24927823e-02f, 2.79897536e-04f, -1.95771302e-01f, -3.52437368e-01f, -2.15973096e-02f, -4.38190277e-01f, 4.56869105e-02f, -1.29375499e-02f}; + std::vector dual_coefficients = {-1.55181212e-01f, 2.42698956e-01f, 7.01893432e-03f, + 4.07614474e-01f, -3.24927823e-02f, 2.79897536e-04f, + -1.95771302e-01f, -3.52437368e-01f, -2.15973096e-02f, + -4.38190277e-01f, 4.56869105e-02f, -1.29375499e-02f}; std::vector classes = {0, 1, 2, 3}; std::vector rho = {-0.07489691f, -0.1764396f, -0.21167431f, -0.51619097f}; std::vector kernel_params = {0.001f, 0.f, 3.f}; //gamma, coef0, degree - std::vector X = {1.f, 0.0f, 0.4f, 3.0f, 44.0f, -3.f, 12.0f, 12.9f, -312.f, 23.0f, 11.3f, -222.f, 23.0f, 11.3f, -222.f, 23.0f, 3311.3f, -222.f, 23.0f, 11.3f, -222.f, 43.0f, 413.3f, -114.f}; + std::vector X = {1.f, 0.0f, 0.4f, 3.0f, 44.0f, -3.f, + 12.0f, 12.9f, -312.f, 23.0f, 11.3f, -222.f, + 23.0f, 11.3f, -222.f, 23.0f, 3311.3f, -222.f, + 23.0f, 11.3f, -222.f, 43.0f, 413.3f, -114.f}; std::vector predictions = {1, 0, 1, 1, 1, 0, 1, 0}; std::vector scores = { - -0.227270544f, 0.332829535f, -0.279307127f, -0.518262208f, - 10.1172562f, -0.282575697f, -16.1046638f, 0.659568906f, - -0.996162534f, 4.30999184f, -0.232234091f, -0.707304120f, - -2.45976996f, 8.87092972f, -3.76557732f, -6.76487541f, - -2.45976996f, 8.87092972f, -3.76557732f, -6.76487541f, - 798.446777f, -98.3552551f, -1166.80896f, 144.001923f, - -2.45976996f, 8.87092972f, -3.76557732f, -6.76487541f, - 92.7596283f, 3.99134970f, -151.693329f, 1.44020212f}; + -0.227271f, 0.33283f, -0.279307f, -0.518262f, + 10.1173f, -0.282576f, -16.1047f, 0.659569f, + -0.996163f, 4.30999f, -0.232234f, -0.707304f, + -2.45977f, 8.87093f, -3.76558f, -6.76488f, + -2.45977f, 8.87093f, -3.76558f, -6.76488f, + 798.447f, -98.3553f, -1166.8089599609375f, 144.002f, + -2.45977f, 8.87093f, -3.76558f, -6.76488f, + 92.7596f, 3.99135f, -151.693f, 1.4402f}; test.AddAttribute("kernel_type", std::string("RBF")); test.AddAttribute("coefficients", dual_coefficients); @@ -115,5 +127,47 @@ TEST(MLOpTest, SVMClassifierSVCProbabilities) { test.Run(); } +TEST(MLOpTest, SVMClassifierSVC) { + OpTester test("SVMClassifier", 1, onnxruntime::kMLDomain); + + std::vector coefficients = {1.14360327f, 1.95968249f, -1.175683f, -1.92760275f, -1.32575698f, -1.32575698f, + 0.66332785f, 0.66242913f, 0.53120854f, 0.53510444f, -1.06631298f, -1.06631298f, + 0.66332785f, 0.66242913f, 0.53120854f, 0.53510444f, 1.f, -1.f}; + std::vector support_vectors = {0.f, 0.5f, 32.f, 2.f, 2.9f, -32.f, + 1.f, 1.5f, 1.f, 3.f, 13.3f, -11.f, + 12.f, 12.9f, -312.f, 43.f, 413.3f, -114.f}; + std::vector rho = {0.5279583f}; + std::vector kernel_params = {0.001f, 0.f, 3.f}; //gamma, coef0, degree + std::vector classes = {0, 1}; + std::vector vectors_per_class = {3, 3}; + + std::vector X = {1.f, 0.0f, 0.4f, + 3.0f, 44.0f, -3.f, + 12.0f, 12.9f, -312.f, + 23.0f, 11.3f, -222.f, + 23.0f, 11.3f, -222.f}; + std::vector scores_predictions = { + 0.95695829391479492f, -0.95695829391479492f, + 0.1597825288772583f, -0.1597825288772583f, + 0.797798752784729f, -0.797798752784729f, + -0.52760261297225952f, 0.52760261297225952f, + -0.52760261297225952f, 0.52760261297225952f}; + std::vector class_predictions = {1, 1, 1, 0, 0}; + + test.AddAttribute("kernel_type", std::string("RBF")); + test.AddAttribute("coefficients", coefficients); + test.AddAttribute("support_vectors", support_vectors); + test.AddAttribute("vectors_per_class", vectors_per_class); + test.AddAttribute("rho", rho); + test.AddAttribute("kernel_params", kernel_params); + test.AddAttribute("classlabels_ints", classes); + + test.AddInput("X", {5, 3}, X); + test.AddOutput("Y", {5}, class_predictions); + test.AddOutput("Z", {5, 2}, scores_predictions); + + test.Run(); +} + } // namespace test } // namespace onnxruntime