diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e58400d0f..4fc39b23e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,11 +8,6 @@ repos: files: \.(md|yml)$ - id: remove-tabs files: \.(md|yml)$ -- repo: https://github.com/PaddlePaddle/mirrors-yapf.git - rev: v0.16.2 - hooks: - - id: yapf - files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: diff --git a/ce_tests/dygraph/quant/src/eval.py b/ce_tests/dygraph/quant/src/eval.py index b6afdc1dd..e8880ba14 100644 --- a/ce_tests/dygraph/quant/src/eval.py +++ b/ce_tests/dygraph/quant/src/eval.py @@ -17,9 +17,13 @@ def eval(args): - model_file = os.path.join(args.model_path, args.model_filename) - params_file = os.path.join(args.model_path, args.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(args.model_path, model_prefix)) config.enable_mkldnn() config.switch_ir_optim(False) diff --git a/ce_tests/dygraph/quant/src/test.py b/ce_tests/dygraph/quant/src/test.py index 1dbd85d75..6ad395ed4 100644 --- a/ce_tests/dygraph/quant/src/test.py +++ b/ce_tests/dygraph/quant/src/test.py @@ -18,9 +18,13 @@ def eval(args): # create predictor - model_file = os.path.join(args.model_path, args.model_filename) - params_file = os.path.join(args.model_path, args.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(args.model_path, model_prefix)) if args.use_gpu: config.enable_use_gpu(1000, 0) if not args.ir_optim: diff --git a/demo/dygraph/post_quant/eval.py b/demo/dygraph/post_quant/eval.py index f9af9e39f..e9dd8341a 100644 --- a/demo/dygraph/post_quant/eval.py +++ b/demo/dygraph/post_quant/eval.py @@ -30,9 +30,14 @@ def eval(): # create predictor - model_file = os.path.join(FLAGS.model_path, FLAGS.model_filename) - params_file = os.path.join(FLAGS.model_path, FLAGS.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(FLAGS.model_path, FLAGS.model_filename), + os.path.join(FLAGS.model_path, FLAGS.params_filename)) + else: + model_prefix = FLAGS.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(FLAGS.model_path, model_prefix)) + if FLAGS.use_gpu: config.enable_use_gpu(1000, 0) if not FLAGS.ir_optim: diff --git a/example/auto_compression/detection/paddle_inference_eval.py b/example/auto_compression/detection/paddle_inference_eval.py index 838d37e13..6d7cd26f7 100644 --- a/example/auto_compression/detection/paddle_inference_eval.py +++ b/example/auto_compression/detection/paddle_inference_eval.py @@ -243,9 +243,13 @@ def load_predictor( raise ValueError( "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}". format(precision, device)) - config = Config( + # support paddle 2.x + if '2' in paddle.__version__.split('.')[0]: + config = Config( os.path.join(model_dir, "model.pdmodel"), os.path.join(model_dir, "model.pdiparams")) + else: + config = Config(os.path.join(model_dir, "model")) config.enable_memory_optim() if device == "GPU": diff --git a/example/auto_compression/image_classification/paddle_inference_eval.py b/example/auto_compression/image_classification/paddle_inference_eval.py index d36073875..4e6214795 100644 --- a/example/auto_compression/image_classification/paddle_inference_eval.py +++ b/example/auto_compression/image_classification/paddle_inference_eval.py @@ -113,9 +113,14 @@ def __init__(self): def _create_paddle_predictor(self): inference_model_dir = args.model_path - model_file = os.path.join(inference_model_dir, args.model_filename) - params_file = os.path.join(inference_model_dir, args.params_filename) - config = paddle.inference.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(inference_model_dir, args.model_filename), + os.path.join(inference_model_dir, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(inference_model_dir, model_prefix)) + precision = paddle.inference.Config.Precision.Float32 if args.use_int8: precision = paddle.inference.Config.Precision.Int8 diff --git a/example/auto_compression/nlp/paddle_inference_eval.py b/example/auto_compression/nlp/paddle_inference_eval.py index 073f032e5..0a6584e4e 100644 --- a/example/auto_compression/nlp/paddle_inference_eval.py +++ b/example/auto_compression/nlp/paddle_inference_eval.py @@ -207,9 +207,13 @@ def create_predictor(cls, args): create_predictor func """ cls.rerun_flag = False - config = paddle.inference.Config( - os.path.join(args.model_path, args.model_filename), - os.path.join(args.model_path, args.params_filename)) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix=args.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(args.model_path, model_prefix)) config.switch_ir_debug(True) # 适用于ERNIE 3.0-Medium模型 # config.exp_disable_tensorrt_ops(["elementwise_add"]) diff --git a/example/auto_compression/nlp/paddle_inference_eval_uie.py b/example/auto_compression/nlp/paddle_inference_eval_uie.py index 2f378ef02..2a3319170 100644 --- a/example/auto_compression/nlp/paddle_inference_eval_uie.py +++ b/example/auto_compression/nlp/paddle_inference_eval_uie.py @@ -162,10 +162,14 @@ def create_predictor(cls, args): """ create_predictor func """ - cls.rerun_flag = False - config = paddle.inference.Config( - os.path.join(args.model_path, args.model_filename), - os.path.join(args.model_path, args.params_filename)) + cls.rerun_flag = False + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix=args.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(args.model_path, model_prefix)) if args.device == "gpu": # set GPU configs accordingly config.enable_use_gpu(100, 0) diff --git a/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py b/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py index 338825a66..6ce2b1453 100644 --- a/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py +++ b/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py @@ -188,9 +188,14 @@ def create_predictor(cls, args): create_predictor func """ cls.rerun_flag = False - config = paddle.inference.Config( - os.path.join(args.model_path, args.model_filename), - os.path.join(args.model_path, args.params_filename)) + + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(args.model_path, model_prefix)) if args.device == "gpu": # set GPU configs accordingly diff --git a/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py b/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py index a1df31b78..77c04d76a 100644 --- a/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py +++ b/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py @@ -253,9 +253,13 @@ def load_predictor( raise ValueError( "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}". format(precision, device)) - config = Config( + if '2' in paddle.__version__.split('.')[0]: + config = Config( os.path.join(model_dir, "model.pdmodel"), os.path.join(model_dir, "model.pdiparams")) + else: + config = Config(os.path.join(model_dir, "model")) + if device == "GPU": # initial GPU memory(M), device ID config.enable_use_gpu(200, 0) diff --git a/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py b/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py index 5b471690c..40c155e38 100644 --- a/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py +++ b/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py @@ -244,9 +244,12 @@ def load_predictor(model_dir, raise ValueError( "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}" .format(run_mode, device)) - config = Config( - os.path.join(model_dir, 'model.pdmodel'), - os.path.join(model_dir, 'model.pdiparams')) + if '2' in paddle.__version__.split('.')[0]: + config = Config( + os.path.join(model_dir, "model.pdmodel"), + os.path.join(model_dir, "model.pdiparams")) + else: + config = Config(os.path.join(model_dir, "model")) if device == 'GPU': # initial GPU memory(M), device ID config.enable_use_gpu(200, 0) diff --git a/example/auto_compression/semantic_segmentation/paddle_inference_eval.py b/example/auto_compression/semantic_segmentation/paddle_inference_eval.py index f9066389b..b71426e07 100644 --- a/example/auto_compression/semantic_segmentation/paddle_inference_eval.py +++ b/example/auto_compression/semantic_segmentation/paddle_inference_eval.py @@ -44,9 +44,13 @@ def load_predictor(args): load predictor func """ rerun_flag = False - model_file = os.path.join(args.model_path, args.model_filename) - params_file = os.path.join(args.model_path, args.params_filename) - pred_cfg = PredictConfig(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + pred_cfg = PredictConfig( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split(".")[0] + pred_cfg = PredictConfig(os.path.join(args.model_path, model_prefix)) pred_cfg.enable_memory_optim() pred_cfg.switch_ir_optim(True) if args.device == "GPU": diff --git a/example/quantization/ptq/classification/eval.py b/example/quantization/ptq/classification/eval.py index ef7dc749e..80c202194 100644 --- a/example/quantization/ptq/classification/eval.py +++ b/example/quantization/ptq/classification/eval.py @@ -30,9 +30,13 @@ def eval(): # create predictor - model_file = os.path.join(FLAGS.model_path, FLAGS.model_filename) - params_file = os.path.join(FLAGS.model_path, FLAGS.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(FLAGS.model_path, FLAGS.model_filename), + os.path.join(FLAGS.model_path, FLAGS.params_filename)) + else: + model_prefix = FLAGS.model_filename.split('.')[0] + config = paddle.inference.Config(os.path.join(FLAGS.model_path, model_prefix)) if FLAGS.use_gpu: config.enable_use_gpu(1000, 0) if not FLAGS.ir_optim: