From 668b902884e11f5cc909d4bc0dbed1856555ab25 Mon Sep 17 00:00:00 2001 From: SigureMo Date: Thu, 8 Jun 2023 10:40:56 +0800 Subject: [PATCH 1/4] bump ruff to 0.0.271 and update config --- .flake8 | 22 +++------- .pre-commit-config.yaml | 2 +- .../api/yaml/generator/tensor_operants_gen.py | 14 +++--- pyproject.toml | 34 +++++++------- .../auto_parallel/static/helper.py | 2 +- .../paddle/distributed/fleet/base/topology.py | 15 +++---- .../distributed/fleet/base/util_factory.py | 2 +- .../fleet/data_generator/data_generator.py | 3 +- .../distributed/fleet/dataset/dataset.py | 17 +++---- .../paddle/distributed/fleet/launch_utils.py | 12 ++--- .../pp_utils/p2p_communication.py | 5 +-- .../sharding/group_sharded_stage2.py | 6 +-- .../sharding/group_sharded_storage.py | 2 +- python/paddle/distributed/metric/metrics.py | 10 ++--- python/paddle/distributed/parallel.py | 3 +- .../passes/auto_parallel_recompute.py | 2 +- .../distributed/passes/fuse_all_reduce.py | 2 +- python/paddle/distributed/passes/pass_base.py | 14 +++--- python/paddle/distributed/spawn.py | 14 +++--- .../paddle/distributed/utils/launch_utils.py | 5 ++- python/paddle/hapi/model.py | 2 +- .../incubate/distributed/fleet/fleet_util.py | 7 ++- .../fleet/parameter_server/pslib/node.py | 9 ++-- .../pslib/optimizer_factory.py | 44 ++++++++++--------- .../incubate/distributed/fleet/utils.py | 2 +- python/paddle/incubate/nn/attn_bias.py | 2 +- python/paddle/io/dataloader/dataset.py | 2 +- .../paddle/jit/dy2static/convert_operators.py | 2 +- .../paddle/jit/dy2static/partial_program.py | 6 +-- python/paddle/nn/functional/conv.py | 2 +- python/paddle/nn/functional/pooling.py | 4 +- python/paddle/nn/layer/pooling.py | 6 +-- python/paddle/optimizer/lr.py | 6 +-- python/paddle/profiler/profiler_statistic.py | 6 +-- python/paddle/static/amp/decorator.py | 2 +- python/paddle/static/io.py | 2 +- python/paddle/static/nn/common.py | 7 +-- python/paddle/tensor/creation.py | 8 ++-- python/paddle/tensor/logic.py | 3 +- python/paddle/tensor/manipulation.py | 3 +- python/paddle/tensor/math.py | 13 +++--- .../utils/cpp_extension/cpp_extension.py | 2 +- .../utils/cpp_extension/extension_utils.py | 2 +- python/paddle/utils/layers_utils.py | 5 ++- setup.py | 3 +- test/distribution/parameterize.py | 5 +-- test/dygraph_to_static/test_declarative.py | 10 ++--- test/dygraph_to_static/test_tensor_shape.py | 2 +- test/legacy_test/eager_op_test.py | 5 +-- test/legacy_test/gradient_checker.py | 11 ++--- test/legacy_test/prim_op_test.py | 39 +++++++--------- test/legacy_test/test_bilateral_slice_op.py | 1 - test/legacy_test/test_callback_early_stop.py | 2 +- test/legacy_test/test_dist_base.py | 1 - test/legacy_test/test_download.py | 12 ++--- .../test_feed_data_check_shape_type.py | 9 ++-- test/legacy_test/test_full_like_op.py | 3 -- test/legacy_test/test_gradient_clip.py | 12 ++--- test/legacy_test/test_require_version.py | 3 +- test/legacy_test/test_sparse_softmax_op.py | 3 +- test/mkldnn/test_prelu_mkldnn_op.py | 1 - test/quantization/test_imperative_ptq.py | 6 +-- test/quantization/test_imperative_qat_amp.py | 6 +-- tools/CrossStackProfiler/CspFileReader.py | 8 ++-- tools/CrossStackProfiler/NetFileReader.py | 3 +- tools/analysis_build_time.py | 15 +++---- tools/check_op_benchmark_result.py | 8 ++-- tools/continuous_integration/bisect.py | 10 ++--- tools/get_pr_ut.py | 2 +- tools/get_single_test_cov.py | 6 +-- tools/get_ut_file_map.py | 3 +- tools/group_case_for_parallel.py | 3 +- tools/handle_h_cu_file.py | 6 +-- tools/prune_for_jetson.py | 5 +-- tools/sampcd_processor.py | 8 ++-- 75 files changed, 233 insertions(+), 311 deletions(-) diff --git a/.flake8 b/.flake8 index eeee9c2329a825..bbfdb55afbeba7 100644 --- a/.flake8 +++ b/.flake8 @@ -1,13 +1,14 @@ [flake8] -select = C,E,F,W +select = C,E,W exclude = ./build, - # A trick to exclude fluid/ but keep fluid/tests/, see more at - # https://github.com/PaddlePaddle/Paddle/pull/46290#discussion_r976392010 - ./python/paddle/fluid/[!t]**, - ./python/paddle/fluid/tra**, + # Exclude fluid directory + ./python/paddle/fluid/**, # Exclude third-party libraries ./python/paddle/utils/gast/**, + # Temporarily ignore CINN files, it will fix later + ./python/cinn/**, + ./test/cinn/**, ignore = # Whitespace before ‘,’, ‘;’, or ‘:’, it is not compatible with black E203, @@ -23,20 +24,9 @@ ignore = E731, # Do not use variables named ‘l’, ‘O’, or ‘I’ E741, - # `name` may be undefined, or defined from star imports: `module` - F405, - # Local variable name is assigned to but never used - F841, # Line break before binary operator, it is not compatible with black W503 per-file-ignores = # These files need tabs for testing. test/dygraph_to_static/test_error.py:E101,W191 python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191 - # Ignore unused imports in __init__.py - __init__.py: F401 - # Ignore undefined variables in CMake config and some dygraph_to_static tests - .cmake-format.py: F821 - test/dygraph_to_static/test_loop.py: F821 - test/dygraph_to_static/test_closure_analysis.py: F821 - python/paddle/static/amp/decorator.py: F811 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6b725b3849ed02..0d5d0f89ab58d9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -67,7 +67,7 @@ repos: - id: flake8 args: ["--config=.flake8"] - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.254 + rev: v0.0.271 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix, --no-cache] diff --git a/paddle/phi/api/yaml/generator/tensor_operants_gen.py b/paddle/phi/api/yaml/generator/tensor_operants_gen.py index 4cca569c2f0b7a..f86efeaaefac88 100644 --- a/paddle/phi/api/yaml/generator/tensor_operants_gen.py +++ b/paddle/phi/api/yaml/generator/tensor_operants_gen.py @@ -493,10 +493,9 @@ def get_declare_args_without_first_tensor(self, inplace_flag=False): ) first_input_type = " ".join(declare_args[0].split(" ")[:-1]) # NOTE(HongyuJia): Do not consider "const paddle::optional&" - assert first_input_type == "const Tensor&", ( - "Error! The first argument of Tensor Api %s must be Tensor, but received %s" - % (func_name, first_input_type) - ) + assert ( + first_input_type == "const Tensor&" + ), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}" for name in self.attrs['names']: default_value = '' if self.attrs['attr_info'][name][1] is not None: @@ -515,10 +514,9 @@ def get_define_args_without_first_tensor(self, inplace_flag=False): ) first_input_type = " ".join(define_args[0].split(" ")[:-1]) # NOTE(HongyuJia): Do not consider "const paddle::optional&" - assert first_input_type == "const Tensor&", ( - "Error! The first argument of Tensor Api %s must be Tensor, but received %s" - % (func_name, first_input_type) - ) + assert ( + first_input_type == "const Tensor&" + ), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}" for name in self.attrs['names']: define_args.append(self.attrs['attr_info'][name][0] + ' ' + name) # remove first Tensor argument diff --git a/pyproject.toml b/pyproject.toml index 6a5dc31cde60b7..486b7218f18b33 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,22 +11,26 @@ skip = ["build", "__init__.py"] extend_skip_glob = [ # These files do not need to be formatted, # see .flake8 for more details - "python/paddle/fluid/[!t]**", - "python/paddle/fluid/tra**", + "python/paddle/fluid/**", "python/paddle/utils/gast/**", + # Temporarily ignore CINN files, it will fix later + "python/cinn/**", + "test/cinn/**", ] [tool.ruff] exclude = [ "./build", - "./python/paddle/fluid/[!t]**", - "./python/paddle/fluid/tra**", + "./python/paddle/fluid/**", "./python/paddle/utils/gast/**", + # Temporarily ignore CINN files, it will fix later + "python/cinn/**", + "test/cinn/**", ] target-version = "py37" select = [ # Pyflakes - "F401", + "F", # Comprehensions "C4", @@ -60,17 +64,9 @@ select = [ "B032", # Pylint + "PLE", "PLC0414", "PLC3002", - "PLE0100", - "PLE0101", - "PLE0604", - "PLE0605", - "PLE1142", - "PLE1205", - "PLE1206", - "PLE1307", - "PLE2502", "PLR0206", "PLR0402", ] @@ -78,6 +74,10 @@ unfixable = [ "NPY001" ] ignore = [ + # `name` may be undefined, or defined from star imports: `module` + "F405", + # Local variable name is assigned to but never used + "F841", # It not met the "Explicit is better than implicit" rule "UP015", # It will cause the performance regression on python3.10 @@ -87,9 +87,13 @@ ignore = [ [tool.ruff.per-file-ignores] # Ignore unused imports in __init__.py "__init__.py" = ["F401"] +# Ignore undefined variables in CMake config and some dygraph_to_static tests +".cmake-format.py" = ["F821"] +"test/dygraph_to_static/test_closure_analysis.py" = ["F821"] +"python/paddle/static/amp/decorator.py" = ["F821"] # Ignore version check in setup.py "setup.py" = ["UP036"] # Ignore unnecessary comprehension in dy2st unittest test_loop -"test/dygraph_to_static/test_loop.py" = ["C416"] +"test/dygraph_to_static/test_loop.py" = ["C416", "F821"] # Ignore unnecessary lambda in dy2st unittest test_lambda "test/dygraph_to_static/test_lambda.py" = ["PLC3002"] diff --git a/python/paddle/distributed/auto_parallel/static/helper.py b/python/paddle/distributed/auto_parallel/static/helper.py index e901c861131cc0..f705ee49688484 100644 --- a/python/paddle/distributed/auto_parallel/static/helper.py +++ b/python/paddle/distributed/auto_parallel/static/helper.py @@ -242,7 +242,7 @@ def build_program(self, mode): # NOTE(dev): Because @to_static is a Lazy mechanism, so we explicitly call this to trigger # generating Program IR immediately. - getattr(self.proxy_layer, func_name).concrete_program + getattr(self.proxy_layer, func_name).concrete_program # noqa: B018 self._build_startup_program() diff --git a/python/paddle/distributed/fleet/base/topology.py b/python/paddle/distributed/fleet/base/topology.py index 4c7b780a5710ac..860fabd15cb255 100644 --- a/python/paddle/distributed/fleet/base/topology.py +++ b/python/paddle/distributed/fleet/base/topology.py @@ -204,15 +204,12 @@ def __init__(self, topology): self._dp_degree, ) ) - debug_str += ( - ", mp_group: %s, sharding_group: %s, pp_group: %s, dp_group: %s, check/clip group: %s" - % ( - self._mp_group, - self._sharding_group, - self._pp_group, - self._dp_group, - self._check_group, - ) + debug_str += ", mp_group: {}, sharding_group: {}, pp_group: {}, dp_group: {}, check/clip group: {}".format( + self._mp_group, + self._sharding_group, + self._pp_group, + self._dp_group, + self._check_group, ) logger.info(debug_str) diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index b203ede4f3d386..2c241d2f920b42 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -583,7 +583,7 @@ def check_not_expected_ops(prog, not_expected_op_types): global_block._remove_op(index) # if fetch_list have lod tensor - return_numpy = all([v.lod_level == 0 for v in fetch_list]) + return_numpy = all(v.lod_level == 0 for v in fetch_list) # try dump fetch_targets feed_tensors = [] diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index a376151e9b63bb..6fa6652e2c6bab 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -366,8 +366,7 @@ def _gen_str(self, line): ) if name != self._proto_info[index][0]: raise ValueError( - "the field name of two given line are not match: require<%s>, get<%s>." - % (self._proto_info[index][0], name) + f"the field name of two given line are not match: require<{self._proto_info[index][0]}>, get<{name}>." ) if output: output += " " diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index 0370e74338cd10..a28d439f7ecd52 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -307,8 +307,7 @@ def _check_use_var_with_data_generator( data_gen_len = len(user_parsed_line) if var_len != data_gen_len: raise ValueError( - "var length mismatch error: var_list = %s vs data_generator = %s" - % (var_len, data_gen_len) + f"var length mismatch error: var_list = {var_len} vs data_generator = {data_gen_len}" ) for i, ele in enumerate(user_parsed_line): @@ -324,10 +323,11 @@ def _check_use_var_with_data_generator( isinstance(ele, float) for ele in ele[1] ): raise TypeError( - "var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-float value, which is %s \n" + "var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-float value, which is {} \n" "Please check if order of var_list and data_generator are aligned. \n" - "Please check if var's type in data_generator is correct." - % (ele[0], "float", ele[1]) + "Please check if var's type in data_generator is correct.".format( + ele[0], "float", ele[1] + ) ) if ( @@ -335,10 +335,11 @@ def _check_use_var_with_data_generator( or var_list[i].dtype == core.VarDesc.VarType.INT32 ) and not all(isinstance(ele, int) for ele in ele[1]): raise TypeError( - "var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-int value, which is %s \n" + "var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-int value, which is {} \n" "Please check if order of var_list and data_generator are aligned. \n" - "Please check if var's type in data_generator is correct." - % (ele[0], "int", ele[1]) + "Please check if var's type in data_generator is correct.".format( + ele[0], "int", ele[1] + ) ) else: diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index a1a6cdb9c636f4..4334f4971e8b91 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -695,8 +695,9 @@ def get_gpus(gpus): for x in gpus.split(','): assert x in cuda_visible_devices_list, ( "Can't find " - "your gpus %s in CUDA_VISIBLE_DEVICES[%s]." - % (x, cuda_visible_devices) + "your gpus {} in CUDA_VISIBLE_DEVICES[{}].".format( + x, cuda_visible_devices + ) ) res_gpus = [ cuda_visible_devices_list.index(x.strip()) @@ -1485,10 +1486,9 @@ def get_role_endpoints(self, args): else: self.current_node_ip = pod_ip if not self.distribute_mode == DistributeMode.PS_HETER: - assert self.current_node_ip in self.node_ips, ( - "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" - % (self.current_node_ip, self.node_ips) - ) + assert ( + self.current_node_ip in self.node_ips + ), f"Can't find your local ip {{{self.current_node_ip}}} in args.servers and args.workers ips: {{{self.node_ips}}}" if self.current_node_ip in self.node_ips: self.node_rank = self.node_ips.index(self.current_node_ip) logger.debug( diff --git a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py index 4983de3f2ab552..f686076b77f45e 100644 --- a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py +++ b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py @@ -69,9 +69,8 @@ def initialize_p2p_groups( ) = _hcg.get_p2p_groups() debug_str = ( - "P2pInfo: send_next_group: %s, send_prev_group: %s, " - "recv_next_group: %s, recv_prev_group: %s" - % ( + "P2pInfo: send_next_group: {}, send_prev_group: {}, " + "recv_next_group: {}, recv_prev_group: {}".format( repr(send_next_group), repr(send_prev_group), repr(recv_next_group), diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py index 13ec8da6f112c3..2f3ddbed277d88 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py @@ -79,10 +79,8 @@ def __init__( else sharding_optimizer ) assert all( - [ - isinstance(opt, GroupShardedOptimizerStage2) - for opt in self._sharding_optimizers - ] + isinstance(opt, GroupShardedOptimizerStage2) + for opt in self._sharding_optimizers ), "Please use GroupShardedOptimizerStage2 optimizer" self._sync_buffers = sync_buffers self._auto_refresh_trainable = auto_refresh_trainable diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py index fb86a27072cce0..84dac4ae426376 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py @@ -124,7 +124,7 @@ def add_rank_params(self, trainable_params, param2align, convert_gpu=True): """ assert all( - [id(param) not in self._param_ids for param in trainable_params] + id(param) not in self._param_ids for param in trainable_params ), "The same param cannot be checked in twice" assert self.buffer is not None diff --git a/python/paddle/distributed/metric/metrics.py b/python/paddle/distributed/metric/metrics.py index 5aea93bc8b54dd..1301d764643a1b 100644 --- a/python/paddle/distributed/metric/metrics.py +++ b/python/paddle/distributed/metric/metrics.py @@ -154,16 +154,12 @@ def print_metric(metric_ptr, name): """ if name.find("wuauc") != -1: metric = metric_ptr.get_wuauc_metric_msg(name) - monitor_msg = ( - "%s: User Count=%.0f INS Count=%.0f UAUC=%.6f WUAUC=%.6f " - % (name, metric[0], metric[1], metric[4], metric[5]) - ) + monitor_msg = f"{name}: User Count={metric[0]:.0f} INS Count={metric[1]:.0f} UAUC={metric[4]:.6f} WUAUC={metric[5]:.6f} " else: metric = metric_ptr.get_metric_msg(name) monitor_msg = ( - "%s: AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f RMSE=%.6f " - "Actual CTR=%.6f Predicted CTR=%.6f COPC=%.6f INS Count=%.0f" - % ( + "{}: AUC={:.6f} BUCKET_ERROR={:.6f} MAE={:.6f} RMSE={:.6f} " + "Actual CTR={:.6f} Predicted CTR={:.6f} COPC={:.6f} INS Count={:.0f}".format( name, metric[0], metric[1], diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index f8888b9f71c33d..abd4f3983aebd5 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -425,8 +425,7 @@ def init_reducer(self): params_set.add(param) if not isinstance(param, self.var_dtype): raise TypeError( - "The data type of '%s' must be '%s'" - % (param.name, self.var_dtype) + f"The data type of '{param.name}' must be '{self.var_dtype}'" ) if param.trainable: layers_param.append((sublayer, param)) diff --git a/python/paddle/distributed/passes/auto_parallel_recompute.py b/python/paddle/distributed/passes/auto_parallel_recompute.py index d64e8df305f75a..d69bdb2a44c10a 100644 --- a/python/paddle/distributed/passes/auto_parallel_recompute.py +++ b/python/paddle/distributed/passes/auto_parallel_recompute.py @@ -55,7 +55,7 @@ def reserved_vars(self): return self._reserved_vars def is_recompute(self): - return any([is_recompute_op(op) for op in self.ops]) + return any(is_recompute_op(op) for op in self.ops) def build_states(self): for i, op in enumerate(self.ops): diff --git a/python/paddle/distributed/passes/fuse_all_reduce.py b/python/paddle/distributed/passes/fuse_all_reduce.py index 8389599df8fa2d..a2a08487c19fd6 100755 --- a/python/paddle/distributed/passes/fuse_all_reduce.py +++ b/python/paddle/distributed/passes/fuse_all_reduce.py @@ -177,7 +177,7 @@ def is_valid_allreduce_op(op): if in_var.type != core.VarDesc.VarType.LOD_TENSOR: return False shape = in_var.shape - if any([s <= 0 for s in shape]): + if any(s <= 0 for s in shape): return False return True diff --git a/python/paddle/distributed/passes/pass_base.py b/python/paddle/distributed/passes/pass_base.py index 282bbe41c20545..a781d961e493f9 100755 --- a/python/paddle/distributed/passes/pass_base.py +++ b/python/paddle/distributed/passes/pass_base.py @@ -85,7 +85,7 @@ def _type(self): def _check_conflict_including_common_rules(self, other_pass): return self._check_conflict(other_pass) and all( - [r(other_pass, self) for r in PassBase._COMMON_RULES] + r(other_pass, self) for r in PassBase._COMMON_RULES ) def apply(self, main_programs, startup_programs, context=None): @@ -96,10 +96,8 @@ def apply(self, main_programs, startup_programs, context=None): return context if not all( - [ - self._check_conflict_including_common_rules(p) - for p in context.passes - ] + self._check_conflict_including_common_rules(p) + for p in context.passes ): return context @@ -325,10 +323,8 @@ def _solve_pass_conflict(passes, context): passes = [] for p in old_passes: if all( - [ - p._check_conflict_including_common_rules(applied_p) - for applied_p in context.passes - ] + p._check_conflict_including_common_rules(applied_p) + for applied_p in context.passes ): passes.append(p) diff --git a/python/paddle/distributed/spawn.py b/python/paddle/distributed/spawn.py index 62e6eb0b14228d..b378e27e4d3731 100644 --- a/python/paddle/distributed/spawn.py +++ b/python/paddle/distributed/spawn.py @@ -211,9 +211,10 @@ def _get_subprocess_env_list(nprocs, options): for card_id in selected_device_list: if card_id not in env_devices_list: raise ValueError( - "The selected gpu card %s cannot found in " - "CUDA_VISIBLE_DEVICES (%s)." - % (card_id, ",".join(env_devices_list)) + "The selected gpu card {} cannot found in " + "CUDA_VISIBLE_DEVICES ({}).".format( + card_id, ",".join(env_devices_list) + ) ) elif options['backend'] == 'bkcl': @@ -251,9 +252,10 @@ def _get_subprocess_env_list(nprocs, options): for card_id in selected_device_list: if card_id not in env_devices_list: raise ValueError( - "The selected xpu card %s cannot found in " - "XPU_VISIBLE_DEVICES (%s)." - % (card_id, ",".join(env_devices_list)) + "The selected xpu card {} cannot found in " + "XPU_VISIBLE_DEVICES ({}).".format( + card_id, ",".join(env_devices_list) + ) ) elif options['backend'] == 'gloo': # TODO check gpu / xpu flag must not exist diff --git a/python/paddle/distributed/utils/launch_utils.py b/python/paddle/distributed/utils/launch_utils.py index 634188ab8e51e6..b23beae8a63441 100644 --- a/python/paddle/distributed/utils/launch_utils.py +++ b/python/paddle/distributed/utils/launch_utils.py @@ -82,8 +82,9 @@ def get_gpus(selected_gpus): for x in selected_gpus.split(','): assert x in cuda_visible_devices_list, ( "Can't find " - "your selected_gpus %s in CUDA_VISIBLE_DEVICES[%s]." - % (x, cuda_visible_devices) + "your selected_gpus {} in CUDA_VISIBLE_DEVICES[{}].".format( + x, cuda_visible_devices + ) ) gpus = [ cuda_visible_devices_list.index(x.strip()) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index a20ee3169adc5c..654992e2d3f606 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1894,7 +1894,7 @@ def fit( assert train_data is not None, "train_data must be given!" if isinstance(batch_size, (tuple, list)) and all( - [isinstance(x, int) for x in batch_size] + isinstance(x, int) for x in batch_size ): assert ( len(batch_size) == 2 diff --git a/python/paddle/incubate/distributed/fleet/fleet_util.py b/python/paddle/incubate/distributed/fleet/fleet_util.py index 9e115133089142..23777390cc8918 100644 --- a/python/paddle/incubate/distributed/fleet/fleet_util.py +++ b/python/paddle/incubate/distributed/fleet/fleet_util.py @@ -1640,10 +1640,9 @@ def print_global_metrics( total_ins_num_name, ) self.rank0_print( - "%s global AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f " - "RMSE=%.6f Actural_CTR=%.6f Predicted_CTR=%.6f " - "COPC=%.6f MEAN Q_VALUE=%.6f Ins number=%s" - % ( + "{} global AUC={:.6f} BUCKET_ERROR={:.6f} MAE={:.6f} " + "RMSE={:.6f} Actural_CTR={:.6f} Predicted_CTR={:.6f} " + "COPC={:.6f} MEAN Q_VALUE={:.6f} Ins number={}".format( print_prefix, auc, bucket_error, diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/pslib/node.py b/python/paddle/incubate/distributed/fleet/parameter_server/pslib/node.py index 0a9ccc6e844e40..c1dc7db7b8fc4e 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/pslib/node.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/pslib/node.py @@ -76,8 +76,7 @@ def add_sparse_table(self, table_id, strategy): return else: raise ValueError( - "expect table %s type=%s, but actual type=%s" - % (table_id, pslib.PS_SPARSE_TABLE, table.type) + f"expect table {table_id} type={pslib.PS_SPARSE_TABLE}, but actual type={table.type}" ) if strategy is None: strategy = {} @@ -388,8 +387,7 @@ def add_dense_table( return else: raise ValueError( - "expect table %s type=%s, but actual type=%s" - % (table_id, pslib.PS_DENSE_TABLE, table.type) + f"expect table {table_id} type={pslib.PS_DENSE_TABLE}, but actual type={table.type}" ) if strategy is None: @@ -480,8 +478,7 @@ def add_data_norm_table( return else: raise ValueError( - "expect table %s type=%s, but actual type=%s" - % (table_id, pslib.PS_DENSE_TABLE, table.type) + f"expect table {table_id} type={pslib.PS_DENSE_TABLE}, but actual type={table.type}" ) if strategy is None: strategy = {} diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/pslib/optimizer_factory.py b/python/paddle/incubate/distributed/fleet/parameter_server/pslib/optimizer_factory.py index 99879df0c19b1d..285e9707440a50 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/pslib/optimizer_factory.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/pslib/optimizer_factory.py @@ -277,8 +277,7 @@ def _has_conditional_block(self, loss): def _check_params_grads(self, params, grads): if len(params) != len(grads): raise ValueError( - "params size != grads size, %s vs %s" - % (len(params), len(grads)) + f"params size != grads size, {len(params)} vs {len(grads)}" ) pname2grad = {} @@ -353,8 +352,7 @@ def _gen_distributed_emb_to_size_dict(self, program): d_size[table_name] = emb_size elif d_size[table_name] != emb_size: raise ValueError( - "embedding size error: %s vs %s" - % (emb_size, d_size[table_name]) + f"embedding size error: {emb_size} vs {d_size[table_name]}" ) return d_size @@ -384,9 +382,10 @@ def _check_config_fleet_with_program_op( and st["sparse_embedx_dim"] != emb_to_size[table_name] - 3 ): raise ValueError( - "fleet config sparse_embedx_dim=%s not" - " equal to embedding dim - 3 = %s" - % (st["sparse_embedx_dim"], emb_to_size[table_name] - 3) + "fleet config sparse_embedx_dim={} not" + " equal to embedding dim - 3 = {}".format( + st["sparse_embedx_dim"], emb_to_size[table_name] - 3 + ) ) if ( st.get("sparse_embedx_dim") is not None @@ -394,9 +393,10 @@ def _check_config_fleet_with_program_op( and st["sparse_embedx_dim"] != emb_to_size[table_name] - 1 ): raise ValueError( - "fleet config sparse_embedx_dim=%s not" - " equal to embedding dim - 1 = %s" - % (st["sparse_embedx_dim"], emb_to_size[table_name] - 1) + "fleet config sparse_embedx_dim={} not" + " equal to embedding dim - 1 = {}".format( + st["sparse_embedx_dim"], emb_to_size[table_name] - 1 + ) ) if ( st.get("sparse_embedx_dim") is None @@ -432,9 +432,10 @@ def _check_config_fleet_with_program_op( and st["sparse_embedx_dim"] != emb_to_size[table_name] ): raise ValueError( - "fleet config sparse_embedx_dim=%s not" - " equal to embedding dim = %s" - % (st["sparse_embedx_dim"], emb_to_size[table_name]) + "fleet config sparse_embedx_dim={} not" + " equal to embedding dim = {}".format( + st["sparse_embedx_dim"], emb_to_size[table_name] + ) ) if st.get("sparse_embedx_dim") is None: logger.warning( @@ -603,8 +604,7 @@ def _minimize( else: if len(ps_param.trainer_param) != len(prog_id_to_worker): raise ValueError( - "trainer param size != program size, %s vs %s" - % (len(ps_param.trainer_param), len(prog_id_to_worker)) + f"trainer param size != program size, {len(ps_param.trainer_param)} vs {len(prog_id_to_worker)}" ) idx = 0 # prog_id_to_worker is OrderedDict @@ -682,9 +682,10 @@ def _minimize( and st["sparse_embedx_dim"] != emb_to_size[key] - 3 ): raise ValueError( - "fleet config sparse_embedx_dim=%s not" - " equal to embedding size - 3 = %s" - % (st["sparse_embedx_dim"], emb_to_size[key] - 3) + "fleet config sparse_embedx_dim={} not" + " equal to embedding size - 3 = {}".format( + st["sparse_embedx_dim"], emb_to_size[key] - 3 + ) ) st["sparse_embedx_dim"] = emb_to_size[key] - 3 elif accessor == "DownpourSparseValueAccessor": @@ -693,9 +694,10 @@ def _minimize( and st["sparse_embedx_dim"] != emb_to_size[key] ): raise ValueError( - "fleet config sparse_embedx_dim=%s not" - " equal to embedding size = %s" - % (st["sparse_embedx_dim"], emb_to_size[key]) + "fleet config sparse_embedx_dim={} not" + " equal to embedding size = {}".format( + st["sparse_embedx_dim"], emb_to_size[key] + ) ) st["sparse_embedx_dim"] = emb_to_size[key] diff --git a/python/paddle/incubate/distributed/fleet/utils.py b/python/paddle/incubate/distributed/fleet/utils.py index 4c98707b2ffa5b..45cd520d67e7f9 100644 --- a/python/paddle/incubate/distributed/fleet/utils.py +++ b/python/paddle/incubate/distributed/fleet/utils.py @@ -321,7 +321,7 @@ def try_load_model_vars( global_block._remove_op(index) # if fetch_list have lod tensor - return_numpy = all([v.lod_level == 0 for v in fetch_list]) + return_numpy = all(v.lod_level == 0 for v in fetch_list) # try dump fetch_targets feed_tensors = [] diff --git a/python/paddle/incubate/nn/attn_bias.py b/python/paddle/incubate/nn/attn_bias.py index fbbb016df4e2d1..9e97b636932a2c 100644 --- a/python/paddle/incubate/nn/attn_bias.py +++ b/python/paddle/incubate/nn/attn_bias.py @@ -108,7 +108,7 @@ def from_seqlens(cls, seqlens): @classmethod def from_seqlens_padded(cls, seqlens, padding): - assert all([seqlen <= padding for seqlen in seqlens]) + assert all(seqlen <= padding for seqlen in seqlens) seqstart_py = list(range(0, len(seqlens) * padding + 1, padding)) return cls( seqlen=paddle.to_tensor(seqlens, dtype=paddle.int32), diff --git a/python/paddle/io/dataloader/dataset.py b/python/paddle/io/dataloader/dataset.py index 4442531f7d1620..3e0458ae9b7009 100755 --- a/python/paddle/io/dataloader/dataset.py +++ b/python/paddle/io/dataloader/dataset.py @@ -273,7 +273,7 @@ def __init__(self, tensors): "TensorDataset con only be used in imperative mode" ) assert all( - [tensor.shape[0] == tensors[0].shape[0] for tensor in tensors] + tensor.shape[0] == tensors[0].shape[0] for tensor in tensors ), "tensors not have same shape of the 1st dimension" self.tensors = tensors diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index b5d5b3da4c3c81..39029708320a31 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -599,7 +599,7 @@ def convert_shape(x): """ def has_negative(list_shape): - return any([x < 0 for x in list_shape]) + return any(x < 0 for x in list_shape) # When `x` is Variable: # (1) if x.shape contains -1, such as [2, -1, 64], returns [2, var, 64], diff --git a/python/paddle/jit/dy2static/partial_program.py b/python/paddle/jit/dy2static/partial_program.py index 042977988d4b35..9032c9c23ae7a3 100644 --- a/python/paddle/jit/dy2static/partial_program.py +++ b/python/paddle/jit/dy2static/partial_program.py @@ -583,10 +583,8 @@ def _insert_aggregation_ops_for_var(target_program, var): filter( lambda x: x[0] >= start_idx and any( - [ - out_arg == var_grad_name - for out_arg in x[1].output_arg_names - ] + out_arg == var_grad_name + for out_arg in x[1].output_arg_names ), enumerate(target_program.block(0).ops), ) diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 671ed1765cf222..3a2b7ffe15bd6c 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -102,7 +102,7 @@ def _update_padding_nd(padding, channel_last, num_dims): else: padding_algorithm = "EXPLICIT" padding = convert_to_list(padding, num_dims, 'padding') - if not all([p >= 0 for p in padding]): + if not all(p >= 0 for p in padding): raise ValueError( "Invalid padding, all value should be larger than or equal to 0, but received: {}".format( padding diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index aff98a97b11f0b..f68c892123b8a5 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -749,7 +749,7 @@ def max_unpool1d( This API implements max unpooling 1d opereation. `max_unpool1d` accepts the output of `max_pool1d` as input, including the indices of the maximum value and calculate the partial inverse. - All non-maximum values ​​are set to zero. + All non-maximum values are set to zero. - Input: :math:`(N, C, L_{in})` - Output: :math:`(N, C, L_{out})`, where @@ -1025,7 +1025,7 @@ def max_unpool3d( This API implements max unpooling 3d opereation. `max_unpool3d` accepts the output of `max_pool3d` as input, including the indices of the maximum value and calculate the partial inverse. - All non-maximum values ​​are set to zero. + All non-maximum values are set to zero. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where diff --git a/python/paddle/nn/layer/pooling.py b/python/paddle/nn/layer/pooling.py index fbaabafcb5bb0b..1a3e53095b58ff 100755 --- a/python/paddle/nn/layer/pooling.py +++ b/python/paddle/nn/layer/pooling.py @@ -1123,7 +1123,7 @@ class MaxUnPool1D(Layer): `max_unpool1d` accepts the output of `max_pool1d` as input, including the indices of the maximum value and calculate the partial inverse. - All non-maximum values ​​are set to zero. + All non-maximum values are set to zero. - Input: :math:`(N, C, L_{in})` - Output: :math:`(N, C, L_{out})`, where @@ -1207,7 +1207,7 @@ class MaxUnPool2D(Layer): 'max_unpool2d' accepts the output of 'max_unpool2d' as input Including the indices of the maximum value and calculating the partial inverse - All non-maximum values ​​are set to zero. + All non-maximum values are set to zero. Parameters: @@ -1295,7 +1295,7 @@ class MaxUnPool3D(Layer): `max_unpool3d` accepts the output of `max_pool3d` as input, including the indices of the maximum value and calculate the partial inverse. - All non-maximum values ​​are set to zero. + All non-maximum values are set to zero. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index d28abf5d631c9b..ebdab992784892 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1014,10 +1014,8 @@ def __init__( ) if not all( - [ - milestones[i] < milestones[i + 1] - for i in range(len(milestones) - 1) - ] + milestones[i] < milestones[i + 1] + for i in range(len(milestones) - 1) ): raise ValueError('The elements of milestones must be incremented') if gamma >= 1.0: diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index df1cfe49cd9d2b..6f21ca8eee357b 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -452,10 +452,8 @@ def parse(self, nodetrees): # case 2: TracerEventType is Operator but is communication op elif hostnode.type == TracerEventType.Operator and any( - [ - name in hostnode.name.lower() - for name in _CommunicationOpName - ] + name in hostnode.name.lower() + for name in _CommunicationOpName ): self.cpu_communication_range.append( (hostnode.start_ns, hostnode.end_ns) diff --git a/python/paddle/static/amp/decorator.py b/python/paddle/static/amp/decorator.py index feadae70022be4..a9d105b4db7a32 100644 --- a/python/paddle/static/amp/decorator.py +++ b/python/paddle/static/amp/decorator.py @@ -804,7 +804,7 @@ def run_example_code(): @overload(key=FunctionType.COMMON) -def decorate( +def decorate( # noqa: F811 optimizer, amp_lists=None, level='O1', diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 529a3fc945b8bb..5ca397d5068597 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -75,7 +75,7 @@ def _check_args(caller, args, supported_args=None, deprecated_args=None): def _check_vars(name, var_list): if not isinstance(var_list, list): var_list = [var_list] - if not all([isinstance(var, Variable) for var in var_list]): + if not all(isinstance(var, Variable) for var in var_list): raise ValueError( f"'{name}' should be a Variable or a list of Variable." ) diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index 6aeef4934e5237..7c410efbc05403 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -473,7 +473,7 @@ def data_norm( Args: input (Tensor): The input Tensor. act (str, optional): Activation type, linear|relu|prelu|... Default: None. - epsilon(float, optional): Whether to add small values ​in​to the variance during calculations + epsilon(float, optional): Whether to add small values into the variance during calculations to prevent division by zero. Default: 1e-05. param_attr (ParamAttr, optional): The parameter attribute for Parameter `scale`. Default: None. data_layout (str, optional): Specify the data format of the input, and the data format of the output @@ -1243,8 +1243,9 @@ def conv3d( if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " - "Received: number of channels(%s), groups(%s)." - % (str(num_channels), str(groups)) + "Received: number of channels({}), groups({}).".format( + str(num_channels), str(groups) + ) ) num_filter_channels = num_channels // groups diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index d6e378396f354f..f257e6ec76da1b 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -2108,11 +2108,9 @@ def assign(x, output=None): if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input): # We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types. if not all( - [ - x.shape == (1,) - for x in input - if isinstance(x, (Variable, core.eager.Tensor)) - ] + x.shape == (1,) + for x in input + if isinstance(x, (Variable, core.eager.Tensor)) ): raise TypeError( "Unsupport paddle.assign([Variable, Variable...]) with non-scalar variable." diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 8848f80949d05c..02f87b9d35e3f7 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -78,8 +78,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if binary_op and x.dtype != y.dtype: raise ValueError( - "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." - % (op_name, x.dtype, y.dtype) + f"(InvalidArgument) The DataType of {op_name} Op's Variable must be consistent, but received {x.dtype} and {y.dtype}." ) if out is None: diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 4949aa21553ad0..73fc4af9fa4d14 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1851,8 +1851,7 @@ def stack(x, axis=0, name=None): x = [x] else: raise TypeError( - "The type of '%s' in %s must be %s, but received %s" - % ( + "The type of '{}' in {} must be {}, but received {}".format( 'x', 'stack', 'list[Tensor], tuple[Tensor] or TensorArray', diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 5100b47158626c..91d6aae9dac7fe 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -956,8 +956,7 @@ def multiply(x, y, name=None): else: if x.dtype != y.dtype: raise TypeError( - 'Input tensors must be same type, but received type of x: %s, type of y: %s ' - % (x.dtype, y.dtype) + f'Input tensors must be same type, but received type of x: {x.dtype}, type of y: {y.dtype} ' ) return _elementwise_op(LayerHelper('elementwise_mul', **locals())) @@ -1891,8 +1890,9 @@ def __check_input(x, y): raise ValueError( "After performing an optional transpose, Input X's width should be " "equal to Y's width for multiplication " - "prerequisites. But received X's shape: %s, Y's shape: %s\n" - % (x_shape, y_shape) + "prerequisites. But received X's shape: {}, Y's shape: {}\n".format( + x_shape, y_shape + ) ) if len(y_shape) > 2 and len(x_shape) > 2: @@ -2156,8 +2156,9 @@ def __check_input(x, y): raise ValueError( "After performing an optional transpose, Input X's last dim should be " "equal to Y's last dim for multiplication " - "prerequisites. But received X's shape: %s, Y's shape: %s\n" - % (x_shape, y_shape) + "prerequisites. But received X's shape: {}, Y's shape: {}\n".format( + x_shape, y_shape + ) ) __check_input(nx, ny) diff --git a/python/paddle/utils/cpp_extension/cpp_extension.py b/python/paddle/utils/cpp_extension/cpp_extension.py index 60f3a65a7633a7..7836a3e6268f3c 100644 --- a/python/paddle/utils/cpp_extension/cpp_extension.py +++ b/python/paddle/utils/cpp_extension/cpp_extension.py @@ -716,7 +716,7 @@ def _record_op_info(self): for i, extension in enumerate(self.extensions): sources = [os.path.abspath(s) for s in extension.sources] if not self.contain_cuda_file: - self.contain_cuda_file = any([is_cuda_file(s) for s in sources]) + self.contain_cuda_file = any(is_cuda_file(s) for s in sources) op_names = parse_op_name_from(sources) for op_name in op_names: diff --git a/python/paddle/utils/cpp_extension/extension_utils.py b/python/paddle/utils/cpp_extension/extension_utils.py index 1d6fa487c16d55..e561574f8035c1 100644 --- a/python/paddle/utils/cpp_extension/extension_utils.py +++ b/python/paddle/utils/cpp_extension/extension_utils.py @@ -1267,7 +1267,7 @@ def _write_setup_file( ).lstrip() with_cuda = False - if any([is_cuda_file(source) for source in sources]): + if any(is_cuda_file(source) for source in sources): with_cuda = True log_v(f"with_cuda: {with_cuda}", verbose) diff --git a/python/paddle/utils/layers_utils.py b/python/paddle/utils/layers_utils.py index 3d8b55dd2dcf09..f3a2a45b8351c4 100644 --- a/python/paddle/utils/layers_utils.py +++ b/python/paddle/utils/layers_utils.py @@ -300,8 +300,9 @@ def _recursive_assert_same_structure(nest1, nest2, check_types): if type_nest1 != type_nest2: raise TypeError( "The two structures don't have the same sequence type. First " - "structure has type %s, while second structure has type %s." - % (type_nest1, type_nest2) + "structure has type {}, while second structure has type {}.".format( + type_nest1, type_nest2 + ) ) if isinstance(nest1, dict): keys1 = set(nest1.keys()) diff --git a/setup.py b/setup.py index ae8cf524bafba3..3d43df91abfccd 100644 --- a/setup.py +++ b/setup.py @@ -49,8 +49,7 @@ sys.version_info.minor ): raise RuntimeError( - "You set PY_VERSION=%s, but your current python environment is %s, you should keep them consistent!" - % ( + "You set PY_VERSION={}, but your current python environment is {}, you should keep them consistent!".format( os.getenv("PY_VERSION"), str(sys.version_info.major) + '.' diff --git a/test/distribution/parameterize.py b/test/distribution/parameterize.py index c9f33f304ecc1a..5b32e871ac0a6f 100644 --- a/test/distribution/parameterize.py +++ b/test/distribution/parameterize.py @@ -218,12 +218,11 @@ def from_decorator(cls, args): if "after * must be" not in str(e): raise raise TypeError( - "Parameters must be tuples, but %r is not (hint: use '(%r, )')" - % (args, args), + f"Parameters must be tuples, but {args!r} is not (hint: use '({args!r}, )')", ) def __repr__(self): - return "param(*%r, **%r)" % self + return "param(*{!r}, **{!r})".format(*self) def to_safe_name(s): diff --git a/test/dygraph_to_static/test_declarative.py b/test/dygraph_to_static/test_declarative.py index ddd8680dc3c104..c254720bb52013 100644 --- a/test/dygraph_to_static/test_declarative.py +++ b/test/dygraph_to_static/test_declarative.py @@ -97,7 +97,7 @@ def test_instance_same_class(self): self.assertNotEqual(net_1.forward, net_2.forward) # convert layer into static progam of net_1 - net_1.forward.concrete_program + net_1.forward.concrete_program # noqa: B018 self.assertTrue(len(net_1.forward.program_cache) == 1) # check no conversion applid with net_2 self.assertTrue(len(net_2.forward.program_cache) == 0) @@ -317,7 +317,7 @@ def test_concrete_program(self): # raise error foo_3 = paddle.jit.to_static(foo_func) with self.assertRaises(ValueError): - foo_3.concrete_program + foo_3.concrete_program # noqa: B018 class TestInputDefaultName(unittest.TestCase): @@ -397,17 +397,17 @@ def test_raise_error(self): with self.assertRaisesRegex( RuntimeError, "only available in dynamic mode" ): - net.forward.concrete_program + net.forward.concrete_program # noqa: B018 with self.assertRaisesRegex( RuntimeError, "only available in dynamic mode" ): - net.forward.inputs + net.forward.inputs # noqa: B018 with self.assertRaisesRegex( RuntimeError, "only available in dynamic mode" ): - net.forward.outputs + net.forward.outputs # noqa: B018 class CallNonForwardFuncNet(paddle.nn.Layer): diff --git a/test/dygraph_to_static/test_tensor_shape.py b/test/dygraph_to_static/test_tensor_shape.py index a8436fd3142027..1e2c73d134ba85 100644 --- a/test/dygraph_to_static/test_tensor_shape.py +++ b/test/dygraph_to_static/test_tensor_shape.py @@ -613,7 +613,7 @@ def test(self): x_spec = paddle.static.InputSpec(shape=[None, 10]) func = paddle.jit.to_static(dyfunc_with_if_2, input_spec=[x_spec]) # Call this function to trigger program translation. - func.concrete_program + func.concrete_program # noqa: B018 if __name__ == '__main__': diff --git a/test/legacy_test/eager_op_test.py b/test/legacy_test/eager_op_test.py index b809d371c25503..fe3690a33fe8c1 100644 --- a/test/legacy_test/eager_op_test.py +++ b/test/legacy_test/eager_op_test.py @@ -2228,9 +2228,8 @@ def _assert_is_close( atol=atol, equal_nan=False, err_msg=( - "Operator %s error, %s variable %s (shape: %s, dtype: %s) max gradient diff over limit" - ) - % ( + "Operator {} error, {} variable {} (shape: {}, dtype: {}) max gradient diff over limit" + ).format( self.op_type, msg_prefix, name, diff --git a/test/legacy_test/gradient_checker.py b/test/legacy_test/gradient_checker.py index 68a3e040a095a9..23e69c6b8ae62d 100644 --- a/test/legacy_test/gradient_checker.py +++ b/test/legacy_test/gradient_checker.py @@ -92,7 +92,7 @@ def make_jacobian(x, y_size, np_dtype): ) return jacobians else: - None + pass def _compute_numerical_jacobian(program, x, y, place, scope, delta): @@ -321,10 +321,11 @@ def fail_test(msg): n = numerical[x_idx][y_idx] if not np.allclose(a, n, rtol, atol): msg = ( - 'Jacobian mismatch for output %s ' - 'with respect to input %s on %s,\n' - 'numerical:%s\nanalytical:%s\n' - % (y[y_idx].name, x[x_idx].name, str(place), n, a) + 'Jacobian mismatch for output {} ' + 'with respect to input {} on {},\n' + 'numerical:{}\nanalytical:{}\n'.format( + y[y_idx].name, x[x_idx].name, str(place), n, a + ) ) return fail_test(msg) return True diff --git a/test/legacy_test/prim_op_test.py b/test/legacy_test/prim_op_test.py index 1e3a2d2fe17a94..cca23904a922d2 100644 --- a/test/legacy_test/prim_op_test.py +++ b/test/legacy_test/prim_op_test.py @@ -620,9 +620,8 @@ def check_static_comp(self): # check static forward if len(ret) != len(self.eager_desire): msg = ( - "The static comp forward api out tensor nums is different with eager forward api out tensor nums on %s." - 'when enable_fw_comp is %s, static comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n' - % ( + "The static comp forward api out tensor nums is different with eager forward api out tensor nums on {}." + 'when enable_fw_comp is {}, static comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format( str(self.place), self.enable_fw_comp, len(ret), @@ -699,9 +698,8 @@ def check_jit_comp(self): # check jit comp forward if len(ret) != len(self.eager_desire): msg = ( - "The jit comp forward api out tensor nums is different with eager forward api out tensor nums on %s." - 'when enable_fw_comp is %s, jit comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n' - % ( + "The jit comp forward api out tensor nums is different with eager forward api out tensor nums on {}." + 'when enable_fw_comp is {}, jit comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format( str(self.place), self.enable_fw_comp, len(ret), @@ -795,9 +793,8 @@ def check_jit_comp_with_cinn(self): # check jit comp forward if len(ret) != len(self.eager_desire): msg = ( - "The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on %s." - 'when enable_fw_comp is %s, enable_cinn is %s, jit comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n' - % ( + "The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on {}." + 'when enable_fw_comp is {}, enable_cinn is {}, jit comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format( str(self.place), self.enable_fw_comp, core.is_compiled_with_cinn() and self.enable_cinn, @@ -869,8 +866,8 @@ def check(self): def get_output_dict(self, np_outputs, api_outputs, outputs_sig): assert len(api_outputs) <= len(outputs_sig), ( - "forward api outputs length must be the less than or equal to KernelSignature outputs,but receive %s and %s" - ) % (len(api_outputs), len(outputs_sig)) + "forward api outputs length must be the less than or equal to KernelSignature outputs,but receive {} and {}" + ).format(len(api_outputs), len(outputs_sig)) output_dict = {} for i in range(len(api_outputs)): output_name = outputs_sig[i] @@ -992,9 +989,8 @@ def check_eager_comp(self): # check static forward if len(actual_ret) != len(self.eager_desire): msg = ( - "The eager comp grad out tensor nums is different with eager grad out tensor nums on %s." - 'when enable_rev_comp is %s, eager comp grad api out tensor nums = %s, eager grad out tensor nums = %s. \n' - % ( + "The eager comp grad out tensor nums is different with eager grad out tensor nums on {}." + 'when enable_rev_comp is {}, eager comp grad api out tensor nums = {}, eager grad out tensor nums = {}. \n'.format( str(self.place), self.enable_rev_comp, len(actual_ret), @@ -1098,9 +1094,8 @@ def check_static_comp(self): # check static grad out if len(actual_ret) != len(self.eager_desire): msg = ( - "The static comp grad out tensor nums is different with eager grad out tensor nums on %s." - 'when enable_fw_comp is %s,enable_rev_comp is %s, static comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n' - % ( + "The static comp grad out tensor nums is different with eager grad out tensor nums on {}." + 'when enable_fw_comp is {},enable_rev_comp is {}, static comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format( str(self.place), self.enable_fw_comp, self.enable_rev_comp, @@ -1215,9 +1210,8 @@ def check_jit_comp(self): # check jit comp grad out if len(ret) != len(self.eager_desire): msg = ( - "The jit comp grad out tensor nums is different with eager grad out tensor nums on %s." - 'when enable_fw_comp is %s, enable_rev_comp is %s, jit comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n' - % ( + "The jit comp grad out tensor nums is different with eager grad out tensor nums on {}." + 'when enable_fw_comp is {}, enable_rev_comp is {}, jit comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format( str(self.place), self.enable_fw_comp, self.enable_rev_comp, @@ -1346,9 +1340,8 @@ def check_jit_comp_with_cinn(self): # check jit comp grad out if len(ret) != len(self.eager_desire): msg = ( - "The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on %s." - 'when enable_fw_comp is %s, enable_rev_comp is %s, enable_cinn is %s, jit comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n' - % ( + "The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on {}." + 'when enable_fw_comp is {}, enable_rev_comp is {}, enable_cinn is {}, jit comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format( str(self.place), self.enable_fw_comp, self.enable_rev_comp, diff --git a/test/legacy_test/test_bilateral_slice_op.py b/test/legacy_test/test_bilateral_slice_op.py index 415ffb789304fc..f62b19a813e875 100644 --- a/test/legacy_test/test_bilateral_slice_op.py +++ b/test/legacy_test/test_bilateral_slice_op.py @@ -171,7 +171,6 @@ def setUp(self): def test_check_output(self): place = paddle.fluid.CUDAPlace(0) self.check_output_with_place(place, atol=1e-5) - self.check_output def test_check_grad(self): place = paddle.fluid.CUDAPlace(0) diff --git a/test/legacy_test/test_callback_early_stop.py b/test/legacy_test/test_callback_early_stop.py index 4b6089bfdf837b..2b6a3ffee6abaf 100644 --- a/test/legacy_test/test_callback_early_stop.py +++ b/test/legacy_test/test_callback_early_stop.py @@ -56,7 +56,7 @@ def tearDown(self): def test_earlystopping(self): paddle.seed(2020) for dynamic in [True, False]: - paddle.enable_static if not dynamic else None + paddle.enable_static() if not dynamic else None device = paddle.set_device('cpu') sample_num = 100 train_dataset = MnistDataset(mode='train', sample_num=sample_num) diff --git a/test/legacy_test/test_dist_base.py b/test/legacy_test/test_dist_base.py index defb8b1e13b013..7347eb54bb5552 100755 --- a/test/legacy_test/test_dist_base.py +++ b/test/legacy_test/test_dist_base.py @@ -1337,7 +1337,6 @@ def _get_gloo_trainer_cmd( "PADDLE_TRAINER_ID": f"{trainer_id}", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, "PADDLE_CURRENT_ENDPOINT": ep, - "PADDLE_CURRENT_ENDPOINT": ep, "PADDLE_DISTRI_BACKEND": "gloo", "GLOG_v": "2", } diff --git a/test/legacy_test/test_download.py b/test/legacy_test/test_download.py index 62bc1e30defcf4..742c4b2a651902 100644 --- a/test/legacy_test/test_download.py +++ b/test/legacy_test/test_download.py @@ -90,10 +90,8 @@ def test_uncompress_result(self): uncompressed_path = get_path_from_url(url, root_dir='./test_tar') self.assertTrue( all( - [ - os.path.exists(os.path.join("./test_tar", filepath)) - for filepath in uncompressd_res - ] + os.path.exists(os.path.join("./test_tar", filepath)) + for filepath in uncompressd_res ) ) @@ -106,10 +104,8 @@ def test_uncompress_result(self): uncompressed_path = get_path_from_url(url, root_dir='./test_zip') self.assertTrue( all( - [ - os.path.exists(os.path.join("./test_zip", filepath)) - for filepath in uncompressd_res - ] + os.path.exists(os.path.join("./test_zip", filepath)) + for filepath in uncompressd_res ) ) diff --git a/test/legacy_test/test_feed_data_check_shape_type.py b/test/legacy_test/test_feed_data_check_shape_type.py index 5065056b6f5c15..702281d458446d 100644 --- a/test/legacy_test/test_feed_data_check_shape_type.py +++ b/test/legacy_test/test_feed_data_check_shape_type.py @@ -103,9 +103,8 @@ def test(self): self._test_feed_data_shape_mismatch(use_cuda) self.assertEqual( str(shape_mismatch_err.exception), - "The fed Variable %r should have dimensions = %r, " - "shape = %r, but received fed shape %r on each device" - % ( + "The fed Variable {!r} should have dimensions = {!r}, " + "shape = {!r}, but received fed shape {!r} on each device".format( 'data', len(in_shape_tuple), in_shape_tuple, @@ -117,8 +116,8 @@ def test(self): self._test_feed_data_dtype_mismatch(use_cuda) self.assertEqual( str(dtype_mismatch_err.exception), - "The data type of fed Variable %r must be 'int64', but " - "received 'float64'" % ('label'), + "The data type of fed Variable {!r} must be 'int64', but " + "received 'float64'".format('label'), ) def _test_feed_data_dtype_mismatch(self, use_cuda): diff --git a/test/legacy_test/test_full_like_op.py b/test/legacy_test/test_full_like_op.py index 63bfb7be24fe08..028b1ad89141a5 100644 --- a/test/legacy_test/test_full_like_op.py +++ b/test/legacy_test/test_full_like_op.py @@ -93,9 +93,6 @@ def test_errors(self): ) output = paddle.full_like(input_data, 2.0) - def test_input_dtype(): - paddle.full_like - self.assertRaises( TypeError, paddle.full_like, diff --git a/test/legacy_test/test_gradient_clip.py b/test/legacy_test/test_gradient_clip.py index dc84d9e70b0285..5b72f03339e31d 100644 --- a/test/legacy_test/test_gradient_clip.py +++ b/test/legacy_test/test_gradient_clip.py @@ -476,8 +476,7 @@ def check_clip_result(self, loss, optimizer): b = global_norm_clip self.assertTrue( np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), - "gradient clip by global norm has wrong results, expetcd:%f, but received:%f" - % (a, b), + f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}", ) @@ -505,8 +504,7 @@ def check_clip_result(self, loss, optimizer): b = np.sqrt(np.sum(np.power(v, 2))) self.assertTrue( np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), - "gradient clip by norm has wrong results, expetcd:%f, but received:%f" - % (a, b), + f"gradient clip by norm has wrong results, expetcd:{a:f}, but received:{b:f}", ) @@ -602,8 +600,7 @@ def test_gradient_clip(self): b = global_norm_clip self.assertTrue( np.isclose(a=a, b=b, rtol=1e-3, atol=1e-8), - "gradient clip by global norm has wrong results, expetcd:%f, but received:%f" - % (a, b), + f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}", ) @@ -647,8 +644,7 @@ def test_gradient_clip(self): self.assertTrue( np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), - "gradient clip by global norm has wrong results, expetcd:%f, but received:%f" - % (a, b), + f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}", ) diff --git a/test/legacy_test/test_require_version.py b/test/legacy_test/test_require_version.py index 09a8b098d47bfc..b989dd8b0a9a68 100644 --- a/test/legacy_test/test_require_version.py +++ b/test/legacy_test/test_require_version.py @@ -23,8 +23,7 @@ class VersionTest(unittest.TestCase): def test_check_output(self): warnings.warn( - "paddle.__version__: %s, fluid_version.full_version: %s, fluid_version.major: %s, fluid_version.minor: %s, fluid_version.patch: %s, fluid_version.rc: %s." - % ( + "paddle.__version__: {}, fluid_version.full_version: {}, fluid_version.major: {}, fluid_version.minor: {}, fluid_version.patch: {}, fluid_version.rc: {}.".format( paddle.__version__, fluid_version.full_version, fluid_version.major, diff --git a/test/legacy_test/test_sparse_softmax_op.py b/test/legacy_test/test_sparse_softmax_op.py index 35acb909fe8881..5621abed620c86 100644 --- a/test/legacy_test/test_sparse_softmax_op.py +++ b/test/legacy_test/test_sparse_softmax_op.py @@ -187,8 +187,7 @@ def sparse_softmax(self, sparse, dense_shape, sparse_dim, dim): ) else: print( - "`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`" - % (dim, sparse_dim, dense_dim) + f"`dim(={dim})` must be smaller than `sparse_dim(={sparse_dim}) + dense_dim(={dense_dim})`" ) def check_run(self, dense_shape): diff --git a/test/mkldnn/test_prelu_mkldnn_op.py b/test/mkldnn/test_prelu_mkldnn_op.py index ac08f112de56d8..c2079f0e4d9ce5 100644 --- a/test/mkldnn/test_prelu_mkldnn_op.py +++ b/test/mkldnn/test_prelu_mkldnn_op.py @@ -162,7 +162,6 @@ def calculate_grads(self): dout[:, i], dout[:, i] * self.alpha[i], ) - self.dx elif self.mode == "element": self.dx = np.where(self.x[:] > 0, dout[:], dout[:] * self.alpha) diff --git a/test/quantization/test_imperative_ptq.py b/test/quantization/test_imperative_ptq.py index 959ca8c36d67d9..8ad6fa2832e938 100644 --- a/test/quantization/test_imperative_ptq.py +++ b/test/quantization/test_imperative_ptq.py @@ -253,8 +253,7 @@ def func_ptq(self): self.assertTrue( after_acc_top1 >= self.eval_acc_top1, - msg="The test acc {%f} is less than {%f}." - % (after_acc_top1, self.eval_acc_top1), + msg=f"The test acc {{{after_acc_top1:f}}} is less than {{{self.eval_acc_top1:f}}}.", ) self.assertTrue( infer_acc_top1 >= after_acc_top1, @@ -322,8 +321,7 @@ def func_ptq(self): # The acc of quantized model should be higher than 0.95. self.assertTrue( after_acc_top1 >= self.eval_acc_top1, - msg="The test acc {%f} is less than {%f}." - % (after_acc_top1, self.eval_acc_top1), + msg=f"The test acc {{{after_acc_top1:f}}} is less than {{{self.eval_acc_top1:f}}}.", ) # Check the saved infer_model.The acc of infer model # should not be lower than the one of dygraph model. diff --git a/test/quantization/test_imperative_qat_amp.py b/test/quantization/test_imperative_qat_amp.py index c1b1e005cde510..5b88ef49f9774c 100644 --- a/test/quantization/test_imperative_qat_amp.py +++ b/test/quantization/test_imperative_qat_amp.py @@ -220,13 +220,11 @@ def test_ptq(self): ) _logger.info( - 'fp32_acc_top1: %f, int8_acc_top1: %f' - % (fp32_acc_top1, int8_acc_top1) + f'fp32_acc_top1: {fp32_acc_top1:f}, int8_acc_top1: {int8_acc_top1:f}' ) self.assertTrue( int8_acc_top1 > fp32_acc_top1 - 0.01, - msg='fp32_acc_top1: %f, int8_acc_top1: %f' - % (fp32_acc_top1, int8_acc_top1), + msg=f'fp32_acc_top1: {fp32_acc_top1:f}, int8_acc_top1: {int8_acc_top1:f}', ) input_spec = [ diff --git a/tools/CrossStackProfiler/CspFileReader.py b/tools/CrossStackProfiler/CspFileReader.py index fbf7d7f9c6fb11..28038b5c76d3b1 100755 --- a/tools/CrossStackProfiler/CspFileReader.py +++ b/tools/CrossStackProfiler/CspFileReader.py @@ -112,8 +112,7 @@ def _checkArgsKey(self, key, type): if not isinstance(self._args[key], type): raise TypeError( - "Invalid type of key [%s] in args dict, it should be a %s!" - % (key, type) + f"Invalid type of key [{key}] in args dict, it should be a {type}!" ) exec(f"self._{key} = self._args[\"{key}\"]") @@ -206,8 +205,9 @@ def _sortBySuffix(elem): ) else: self._logger.info( - "file list in dir [%s] is : %s !" - % (self._dataPath, ', '.join(self._fileList)) + "file list in dir [{}] is : {} !".format( + self._dataPath, ', '.join(self._fileList) + ) ) return self._fileList diff --git a/tools/CrossStackProfiler/NetFileReader.py b/tools/CrossStackProfiler/NetFileReader.py index e17fab30726043..faee1618e6ea7b 100755 --- a/tools/CrossStackProfiler/NetFileReader.py +++ b/tools/CrossStackProfiler/NetFileReader.py @@ -63,8 +63,7 @@ def _parseSingleFile(self, fileNameList, tx_pid, rx_pid, q=None): except Exception: self._logger.warning( - "invalid record [%s] in [%s]. skip it!" - % (line[:-1], fileName) + f"invalid record [{line[:-1]}] in [{fileName}]. skip it!" ) traceInfo["traceEvents"] = traceEventList diff --git a/tools/analysis_build_time.py b/tools/analysis_build_time.py index 1ed9e7aa65d6b6..6ae3ee6bbacc10 100644 --- a/tools/analysis_build_time.py +++ b/tools/analysis_build_time.py @@ -25,12 +25,10 @@ def strToSecond(strTime): def getUsefulBuildTimeFile(filename): os.system( - "grep -Po -- '-o .*' %s | grep ' elapsed' | grep -P -v '0:00.* elapse' > %s/tools/analysis_build_time" - % (filename, root_path) + f"grep -Po -- '-o .*' {filename} | grep ' elapsed' | grep -P -v '0:00.* elapse' > {root_path}/tools/analysis_build_time" ) os.system( - "grep -v -- '-o .*' %s |grep ' elapse' | grep -P -v '0:00.* elapse' >> %s/tools/analysis_build_time" - % (filename, root_path) + f"grep -v -- '-o .*' {filename} |grep ' elapse' | grep -P -v '0:00.* elapse' >> {root_path}/tools/analysis_build_time" ) @@ -48,22 +46,19 @@ def analysisBuildTime(): buildTime = line.split(', ')[1].split('elapsed')[0].strip() secondTime = strToSecond(buildTime) os.system( - "echo %s, %s >> %s/tools/tempbuildTime.txt" - % (buildFile, secondTime, root_path) + f"echo {buildFile}, {secondTime} >> {root_path}/tools/tempbuildTime.txt" ) else: buildTime = line.split(', ')[1].split('elapsed')[0].strip() secondTime = strToSecond(buildTime) if secondTime > 30: os.system( - "echo %s, %s >> %s/tools/tempbuildTime.txt" - % (line, secondTime, root_path) + f"echo {line}, {secondTime} >> {root_path}/tools/tempbuildTime.txt" ) except ValueError: print(line) os.system( - 'sort -n -k 2 -r %s/tools/tempbuildTime.txt > %s/tools/buildTime.txt' - % (root_path, root_path) + f'sort -n -k 2 -r {root_path}/tools/tempbuildTime.txt > {root_path}/tools/buildTime.txt' ) diff --git a/tools/check_op_benchmark_result.py b/tools/check_op_benchmark_result.py index 30d417b2fb7717..8db2e850ae290d 100644 --- a/tools/check_op_benchmark_result.py +++ b/tools/check_op_benchmark_result.py @@ -83,12 +83,12 @@ def check_speed_result(case_name, develop_data, pr_data, pr_result): logging.info("------ OP: %s ------" % case_name) logging.info( - "GPU time change: %s (develop: %.7f -> PR: %.7f)" - % (gpu_time_diff_str, develop_gpu_time, pr_gpu_time) + f"GPU time change: {gpu_time_diff_str} (develop: {develop_gpu_time:.7f} -> PR: {pr_gpu_time:.7f})" ) logging.info( - "Total time change: %.5f%% (develop: %.7f -> PR: %.7f)" - % (total_time_diff * 100, develop_total_time, pr_total_time) + "Total time change: {:.5f}% (develop: {:.7f} -> PR: {:.7f})".format( + total_time_diff * 100, develop_total_time, pr_total_time + ) ) logging.info("backward: %s" % pr_result.get("backward")) logging.info("parameters:") diff --git a/tools/continuous_integration/bisect.py b/tools/continuous_integration/bisect.py index cc1e6cd1b80406..0f949d9c50bd1a 100644 --- a/tools/continuous_integration/bisect.py +++ b/tools/continuous_integration/bisect.py @@ -81,10 +81,7 @@ def print_arguments(): # List the commits in mainline branch. os.chdir(args.git_dir) ret = subprocess.check_output( - [ - 'git rev-list --first-parent %s...%s' - % (args.good_commit, args.bad_commit) - ], + [f'git rev-list --first-parent {args.good_commit}...{args.bad_commit}'], shell=True, ) sys.stdout.write('commits found:\n%s\n' % ret) @@ -121,8 +118,9 @@ def print_arguments(): # Link error can happen without complete clean up. cmd = ( 'rm -rf * && ' - 'cmake -DWITH_TESTING=ON %s >> %s && make -j%s >> %s' - % (args.git_dir, args.log_file, args.build_parallel, args.log_file) + 'cmake -DWITH_TESTING=ON {} >> {} && make -j{} >> {}'.format( + args.git_dir, args.log_file, args.build_parallel, args.log_file + ) ) sys.stdout.write('cmd: %s\n' % cmd) try: diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index a61814a1193be4..62d1149bf85788 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -104,7 +104,7 @@ def __wget_with_retry(self, url): def __urlretrieve(self, url, filename): ix = 1 with_proxy = urllib.request.getproxies() - without_proxy = {'http': '', 'http': ''} + without_proxy = {'http': '', 'https': ''} while ix < 6: if ix // 2 == 0: cur_proxy = urllib.request.ProxyHandler(without_proxy) diff --git a/tools/get_single_test_cov.py b/tools/get_single_test_cov.py index e69283aea63a71..a111ea61c6c898 100644 --- a/tools/get_single_test_cov.py +++ b/tools/get_single_test_cov.py @@ -84,8 +84,7 @@ def analysisFNDAFile(rootPath, test): notrelated_ut_map_file ): print( - "make %s and %s successfully" - % (related_ut_map_file, related_ut_map_file) + f"make {related_ut_map_file} and {related_ut_map_file} successfully" ) else: print(f"make {related_ut_map_file} and {related_ut_map_file} failed") @@ -132,8 +131,7 @@ def analysisFNDAFile(rootPath, test): clazz_filename not in related_file_list ): # xx.pb.cc in RELATED xx.pb.h not in RELATED os.system( - 'echo %s >> %s' - % (clazz_filename, notrelated_ut_map_file) + f'echo {clazz_filename} >> {notrelated_ut_map_file}' ) f.close() diff --git a/tools/get_ut_file_map.py b/tools/get_ut_file_map.py index e91f2a263fc7ab..bf469eab987474 100644 --- a/tools/get_ut_file_map.py +++ b/tools/get_ut_file_map.py @@ -34,8 +34,7 @@ def get_all_paddle_file(rootPath): def get_all_uts(rootPath): all_uts_paddle = '%s/build/all_uts_paddle' % rootPath os.system( - r'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s' - % (rootPath, all_uts_paddle) + fr'cd {rootPath}/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > {all_uts_paddle}' ) diff --git a/tools/group_case_for_parallel.py b/tools/group_case_for_parallel.py index 8dd5f6f4ca2639..5e8768aa9cc8a4 100644 --- a/tools/group_case_for_parallel.py +++ b/tools/group_case_for_parallel.py @@ -30,8 +30,7 @@ def group_case_for_parallel(rootPath): 'exclusive_card_tests_mem0', ]: os.system( - 'cd %s/tools && wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test_bak/%s --no-check-certificate' - % (rootPath, filename) + f'cd {rootPath}/tools && wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test_bak/{filename} --no-check-certificate' ) # get nightly tests diff --git a/tools/handle_h_cu_file.py b/tools/handle_h_cu_file.py index a11c3c3aa81ffc..86458045d3de86 100644 --- a/tools/handle_h_cu_file.py +++ b/tools/handle_h_cu_file.py @@ -62,8 +62,7 @@ def insert_pile_to_h_file(rootPath): os.system(f'echo "#define _PRECISE{func.upper()}_" >> {line}') os.system('echo "\n#include \n" >> %s' % line) os.system( - 'echo "__attribute__((constructor)) static void calledFirst%s()\n{" >> %s' - % (func, line) + f'echo "__attribute__((constructor)) static void calledFirst{func}()\n{{" >> {line}' ) os.system( 'echo \' fprintf(stderr,"precise test map fileeee: %%s\\\\n", __FILE__);\n}\' >> %s' @@ -118,8 +117,7 @@ def get_h_cu_file(file_path): ut_path = f"{rootPath}/build/ut_map/{ut}" if os.path.exists(ut_path): os.system( - "cat %s/%s | grep 'precise test map fileeee:'| uniq >> %s/build/ut_map/%s/related_%s.txt" - % (dir_path, filename, rootPath, ut, ut) + f"cat {dir_path}/{filename} | grep 'precise test map fileeee:'| uniq >> {rootPath}/build/ut_map/{ut}/related_{ut}.txt" ) else: print("%s has failed,no has direcotory" % ut) diff --git a/tools/prune_for_jetson.py b/tools/prune_for_jetson.py index 70434c513cad40..5a5b2fcb1c286d 100644 --- a/tools/prune_for_jetson.py +++ b/tools/prune_for_jetson.py @@ -130,10 +130,7 @@ def append_fluid_kernels(): new_content = content.replace(location_str, location_str + append_str) if new_content == content: - print( - "ERROR: can not find \"%s\" in file \"%s\"" - % (location_str, file_name) - ) + print(f"ERROR: can not find \"{location_str}\" in file \"{file_name}\"") return False with open(file_name, 'w', encoding='utf-8') as f: diff --git a/tools/sampcd_processor.py b/tools/sampcd_processor.py index 21697b8a5d7b31..c24dab68c17e00 100644 --- a/tools/sampcd_processor.py +++ b/tools/sampcd_processor.py @@ -264,11 +264,9 @@ def is_required_match(requirestr, cbtitle='not-specified'): return None if all( - [ - k in SAMPLE_CODE_TEST_CAPACITY - for k in requires - if k not in ['skip', 'skiptest'] - ] + k in SAMPLE_CODE_TEST_CAPACITY + for k in requires + if k not in ['skip', 'skiptest'] ): return True From 0742cae0e7398502e576941a836ec01d8243e23d Mon Sep 17 00:00:00 2001 From: SigureMo Date: Thu, 8 Jun 2023 11:27:26 +0800 Subject: [PATCH 2/4] exclude third_party --- .flake8 | 1 + .pre-commit-config.yaml | 3 ++- pyproject.toml | 10 ++++++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.flake8 b/.flake8 index bbfdb55afbeba7..3a4c749f759afa 100644 --- a/.flake8 +++ b/.flake8 @@ -5,6 +5,7 @@ exclude = # Exclude fluid directory ./python/paddle/fluid/**, # Exclude third-party libraries + ./third_party/**, ./python/paddle/utils/gast/**, # Temporarily ignore CINN files, it will fix later ./python/cinn/**, diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0d5d0f89ab58d9..8868924fbf55a4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,8 @@ exclude: | patches/.+| paddle/fluid/framework/fleet/heter_ps/cudf/.+| paddle/fluid/distributed/ps/thirdparty/round_robin.h| - python/paddle/utils/gast/.+ + python/paddle/utils/gast/.+| + third_party/.+ )$ repos: # Common hooks diff --git a/pyproject.toml b/pyproject.toml index 486b7218f18b33..5259a735d819bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,18 @@ [tool.black] -exclude = "build" line-length = 80 skip-string-normalization = true +extend-exclude = ''' +( + third_party/.+ # Exclude third_party directory + | build/.+ # Exclude build directory +) +''' [tool.isort] profile = "black" line_length = 80 known_first_party = ["paddle"] -skip = ["build", "__init__.py"] +skip = ["build", "third_party", "__init__.py"] extend_skip_glob = [ # These files do not need to be formatted, # see .flake8 for more details @@ -21,6 +26,7 @@ extend_skip_glob = [ [tool.ruff] exclude = [ "./build", + "third_party", "./python/paddle/fluid/**", "./python/paddle/utils/gast/**", # Temporarily ignore CINN files, it will fix later From a80b57f61b2048a9760dc768e6d3c8a41bf28fab Mon Sep 17 00:00:00 2001 From: SigureMo Date: Thu, 8 Jun 2023 11:45:22 +0800 Subject: [PATCH 3/4] bump ruff to 0.0.272 --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8868924fbf55a4..287b02ead8e2b4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -67,8 +67,8 @@ repos: hooks: - id: flake8 args: ["--config=.flake8"] -- repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.271 +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.272 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix, --no-cache] From 4747e0a9a911b891ce9c846b3872272903fc67df Mon Sep 17 00:00:00 2001 From: SigureMo Date: Thu, 8 Jun 2023 13:27:37 +0800 Subject: [PATCH 4/4] refine config --- .flake8 | 1 - .pre-commit-config.yaml | 5 ++--- test/collective/fleet/test_hdfs1.py | 22 +++++++++++----------- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/.flake8 b/.flake8 index 3a4c749f759afa..62f8772209809a 100644 --- a/.flake8 +++ b/.flake8 @@ -30,4 +30,3 @@ ignore = per-file-ignores = # These files need tabs for testing. test/dygraph_to_static/test_error.py:E101,W191 - python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 287b02ead8e2b4..437b1dd7c4464f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,12 +33,11 @@ repos: name: Tabs remover (Python) files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ args: [--whitespaces-count, '4'] - # Exclude the fluid directory but keep the fluid/tests directory. + # Exclude the fluid directory. # And exclude some unit test files that require tabs. exclude: | (?x)^( - python/paddle/fluid/(?!tests).+| - python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py| + python/paddle/fluid/.+| test/dygraph_to_static/test_error.py )$ - repo: local diff --git a/test/collective/fleet/test_hdfs1.py b/test/collective/fleet/test_hdfs1.py index 0140db8891d43d..418a9ae3a68fa1 100644 --- a/test/collective/fleet/test_hdfs1.py +++ b/test/collective/fleet/test_hdfs1.py @@ -54,17 +54,17 @@ def test_is_dir(self): s = """ java.io.IOException: Input/output error responseErrorMsg : failed to getFileStatus, errorCode: 3, path: /user/PUBLIC_KM_Data/wangxi16/data/serving_model, lparam: d868f6bb6822c621, errorMessage: inner error - at org.apache.hadoop.util.FileSystemUtil.throwException(FileSystemUtil.java:164) - at org.apache.hadoop.util.FileSystemUtil.dealWithResponse(FileSystemUtil.java:118) - at org.apache.hadoop.lite.client.LiteClientImpl.getFileStatus(LiteClientImpl.java:696) - at org.apache.hadoop.fs.LibDFileSystemImpl.getFileStatus(LibDFileSystemImpl.java:297) - at org.apache.hadoop.fs.LiteFileSystem.getFileStatus(LiteFileSystem.java:514) - at org.apache.hadoop.fs.FsShell.test(FsShell.java:1092) - at org.apache.hadoop.fs.FsShell.run(FsShell.java:2285) - at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65) - at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79) - at org.apache.hadoop.fs.FsShell.main(FsShell.java:2353) - """ # fmt: off, avoid remove tabs in string +\tat org.apache.hadoop.util.FileSystemUtil.throwException(FileSystemUtil.java:164) +\tat org.apache.hadoop.util.FileSystemUtil.dealWithResponse(FileSystemUtil.java:118) +\tat org.apache.hadoop.lite.client.LiteClientImpl.getFileStatus(LiteClientImpl.java:696) +\tat org.apache.hadoop.fs.LibDFileSystemImpl.getFileStatus(LibDFileSystemImpl.java:297) +\tat org.apache.hadoop.fs.LiteFileSystem.getFileStatus(LiteFileSystem.java:514) +\tat org.apache.hadoop.fs.FsShell.test(FsShell.java:1092) +\tat org.apache.hadoop.fs.FsShell.run(FsShell.java:2285) +\tat org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65) +\tat org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79) +\tat org.apache.hadoop.fs.FsShell.main(FsShell.java:2353) + """ print("split lines:", s.splitlines()) self.assertIsNotNone(fs._test_match(s.splitlines()))