From 49382a37d4e040c1d2baf87255bd77a000466446 Mon Sep 17 00:00:00 2001 From: xadupre Date: Sun, 22 Dec 2024 21:53:30 +0100 Subject: [PATCH] fix xgboost Signed-off-by: xadupre --- onnxmltools/utils/tests_helper.py | 13 ++++++++++--- tests/xgboost/test_xgboost_converters.py | 15 ++++++++++----- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/onnxmltools/utils/tests_helper.py b/onnxmltools/utils/tests_helper.py index 78d8eb81..c0084351 100644 --- a/onnxmltools/utils/tests_helper.py +++ b/onnxmltools/utils/tests_helper.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 +import unittest import pickle import os import numpy @@ -87,16 +88,22 @@ def dump_data_and_model( if not os.path.exists(folder): os.makedirs(folder) - if hasattr(model, "predict"): + if "LGBM" in model.__class__.__name__: try: import lightgbm except ImportError: - lightgbm = None + raise unittest.SkipTest("lightgbm cannot be imported.") + else: + lightgbm = None + if "XGB" in model.__class__.__name__ or "Booster" in model.__class__.__name__: try: import xgboost except ImportError: - xgboost = None + raise unittest.SkipTest("xgboost cannot be imported.") + else: + xgboost = None + if hasattr(model, "predict"): if lightgbm is not None and isinstance(model, lightgbm.Booster): # LightGBM Booster model_dict = model.dump_model() diff --git a/tests/xgboost/test_xgboost_converters.py b/tests/xgboost/test_xgboost_converters.py index c364257a..aa5c7477 100644 --- a/tests/xgboost/test_xgboost_converters.py +++ b/tests/xgboost/test_xgboost_converters.py @@ -386,8 +386,13 @@ def test_xgboost_classifier_i5450(self): iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10) - clr = XGBClassifier(objective="multi:softprob", max_depth=1, n_estimators=2) - clr.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=40) + clr = XGBClassifier( + objective="multi:softprob", + max_depth=1, + n_estimators=2, + early_stopping_rounds=40, + ) + clr.fit(X_train, y_train, eval_set=[(X_test, y_test)]) initial_type = [("float_input", FloatTensorType([None, 4]))] onx = convert_xgboost( clr, initial_types=initial_type, target_opset=TARGET_OPSET @@ -725,9 +730,10 @@ def test_xgb_classifier_13(self): colsample_bytree=0.75, random_state=42, verbosity=0, + early_stopping_rounds=40, ) - clr.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=40) + clr.fit(X_train, y_train, eval_set=[(X_test, y_test)]) initial_type = [("float_input", FloatTensorType([None, 797]))] onx = convert_xgboost( @@ -756,14 +762,13 @@ def test_xgb_classifier_13_2(self): "early_stopping_rounds": 113, "random_state": 42, "max_depth": 3, + "eval_metric": ["logloss", "auc", "error"], } - eval_metric = ["logloss", "auc", "error"] model = XGBClassifier(**model_param) model.fit( X=x_train, y=y_train, eval_set=[(x_test, y_test)], - eval_metric=eval_metric, verbose=False, )