Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bump ruff to 0.0.272 and update config #54449

Merged
merged 4 commits into from
Jun 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 7 additions & 17 deletions .flake8
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
[flake8]
select = C,E,F,W
select = C,E,W
exclude =
./build,
# A trick to exclude fluid/ but keep fluid/tests/, see more at
# https://github.com/PaddlePaddle/Paddle/pull/46290#discussion_r976392010
./python/paddle/fluid/[!t]**,
./python/paddle/fluid/tra**,
# Exclude fluid directory
./python/paddle/fluid/**,
# Exclude third-party libraries
./third_party/**,
./python/paddle/utils/gast/**,
# Temporarily ignore CINN files, it will fix later
./python/cinn/**,
./test/cinn/**,
ignore =
# Whitespace before ‘,’, ‘;’, or ‘:’, it is not compatible with black
E203,
Expand All @@ -23,20 +25,8 @@ ignore =
E731,
# Do not use variables named ‘l’, ‘O’, or ‘I’
E741,
# `name` may be undefined, or defined from star imports: `module`
F405,
# Local variable name is assigned to but never used
F841,
# Line break before binary operator, it is not compatible with black
W503
per-file-ignores =
# These files need tabs for testing.
test/dygraph_to_static/test_error.py:E101,W191
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191
# Ignore unused imports in __init__.py
__init__.py: F401
# Ignore undefined variables in CMake config and some dygraph_to_static tests
.cmake-format.py: F821
test/dygraph_to_static/test_loop.py: F821
test/dygraph_to_static/test_closure_analysis.py: F821
python/paddle/static/amp/decorator.py: F811
12 changes: 6 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ exclude: |
patches/.+|
paddle/fluid/framework/fleet/heter_ps/cudf/.+|
paddle/fluid/distributed/ps/thirdparty/round_robin.h|
python/paddle/utils/gast/.+
python/paddle/utils/gast/.+|
third_party/.+
)$
repos:
# Common hooks
Expand Down Expand Up @@ -32,12 +33,11 @@ repos:
name: Tabs remover (Python)
files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
args: [--whitespaces-count, '4']
# Exclude the fluid directory but keep the fluid/tests directory.
# Exclude the fluid directory.
# And exclude some unit test files that require tabs.
exclude: |
(?x)^(
python/paddle/fluid/(?!tests).+|
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py|
python/paddle/fluid/.+|
test/dygraph_to_static/test_error.py
)$
- repo: local
Expand Down Expand Up @@ -66,8 +66,8 @@ repos:
hooks:
- id: flake8
args: ["--config=.flake8"]
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.0.254
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.272
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix, --no-cache]
Expand Down
14 changes: 6 additions & 8 deletions paddle/phi/api/yaml/generator/tensor_operants_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,10 +493,9 @@ def get_declare_args_without_first_tensor(self, inplace_flag=False):
)
first_input_type = " ".join(declare_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert first_input_type == "const Tensor&", (
"Error! The first argument of Tensor Api %s must be Tensor, but received %s"
% (func_name, first_input_type)
)
assert (
first_input_type == "const Tensor&"
), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}"
for name in self.attrs['names']:
default_value = ''
if self.attrs['attr_info'][name][1] is not None:
Expand All @@ -515,10 +514,9 @@ def get_define_args_without_first_tensor(self, inplace_flag=False):
)
first_input_type = " ".join(define_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert first_input_type == "const Tensor&", (
"Error! The first argument of Tensor Api %s must be Tensor, but received %s"
% (func_name, first_input_type)
)
assert (
first_input_type == "const Tensor&"
), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}"
for name in self.attrs['names']:
define_args.append(self.attrs['attr_info'][name][0] + ' ' + name)
# remove first Tensor argument
Expand Down
44 changes: 27 additions & 17 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,32 +1,42 @@
[tool.black]
exclude = "build"
line-length = 80
skip-string-normalization = true
extend-exclude = '''
(
third_party/.+ # Exclude third_party directory
| build/.+ # Exclude build directory
)
'''

[tool.isort]
profile = "black"
line_length = 80
known_first_party = ["paddle"]
skip = ["build", "__init__.py"]
skip = ["build", "third_party", "__init__.py"]
extend_skip_glob = [
# These files do not need to be formatted,
# see .flake8 for more details
"python/paddle/fluid/[!t]**",
"python/paddle/fluid/tra**",
"python/paddle/fluid/**",
"python/paddle/utils/gast/**",
# Temporarily ignore CINN files, it will fix later
"python/cinn/**",
"test/cinn/**",
]

[tool.ruff]
exclude = [
"./build",
"./python/paddle/fluid/[!t]**",
"./python/paddle/fluid/tra**",
"third_party",
"./python/paddle/fluid/**",
"./python/paddle/utils/gast/**",
# Temporarily ignore CINN files, it will fix later
"python/cinn/**",
"test/cinn/**",
]
target-version = "py37"
select = [
# Pyflakes
"F401",
"F",

# Comprehensions
"C4",
Expand Down Expand Up @@ -60,24 +70,20 @@ select = [
"B032",

# Pylint
"PLE",
"PLC0414",
"PLC3002",
"PLE0100",
"PLE0101",
"PLE0604",
"PLE0605",
"PLE1142",
"PLE1205",
"PLE1206",
"PLE1307",
"PLE2502",
"PLR0206",
"PLR0402",
]
unfixable = [
"NPY001"
]
ignore = [
# `name` may be undefined, or defined from star imports: `module`
"F405",
# Local variable name is assigned to but never used
"F841",
# It not met the "Explicit is better than implicit" rule
"UP015",
# It will cause the performance regression on python3.10
Expand All @@ -87,9 +93,13 @@ ignore = [
[tool.ruff.per-file-ignores]
# Ignore unused imports in __init__.py
"__init__.py" = ["F401"]
# Ignore undefined variables in CMake config and some dygraph_to_static tests
".cmake-format.py" = ["F821"]
"test/dygraph_to_static/test_closure_analysis.py" = ["F821"]
"python/paddle/static/amp/decorator.py" = ["F821"]
# Ignore version check in setup.py
"setup.py" = ["UP036"]
# Ignore unnecessary comprehension in dy2st unittest test_loop
"test/dygraph_to_static/test_loop.py" = ["C416"]
"test/dygraph_to_static/test_loop.py" = ["C416", "F821"]
# Ignore unnecessary lambda in dy2st unittest test_lambda
"test/dygraph_to_static/test_lambda.py" = ["PLC3002"]
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/static/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def build_program(self, mode):

# NOTE(dev): Because @to_static is a Lazy mechanism, so we explicitly call this to trigger
# generating Program IR immediately.
getattr(self.proxy_layer, func_name).concrete_program
getattr(self.proxy_layer, func_name).concrete_program # noqa: B018

self._build_startup_program()

Expand Down
15 changes: 6 additions & 9 deletions python/paddle/distributed/fleet/base/topology.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,15 +204,12 @@ def __init__(self, topology):
self._dp_degree,
)
)
debug_str += (
", mp_group: %s, sharding_group: %s, pp_group: %s, dp_group: %s, check/clip group: %s"
% (
self._mp_group,
self._sharding_group,
self._pp_group,
self._dp_group,
self._check_group,
)
debug_str += ", mp_group: {}, sharding_group: {}, pp_group: {}, dp_group: {}, check/clip group: {}".format(
self._mp_group,
self._sharding_group,
self._pp_group,
self._dp_group,
self._check_group,
)
logger.info(debug_str)

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/base/util_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,7 +583,7 @@ def check_not_expected_ops(prog, not_expected_op_types):
global_block._remove_op(index)

# if fetch_list have lod tensor
return_numpy = all([v.lod_level == 0 for v in fetch_list])
return_numpy = all(v.lod_level == 0 for v in fetch_list)

# try dump fetch_targets
feed_tensors = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,8 +366,7 @@ def _gen_str(self, line):
)
if name != self._proto_info[index][0]:
raise ValueError(
"the field name of two given line are not match: require<%s>, get<%s>."
% (self._proto_info[index][0], name)
f"the field name of two given line are not match: require<{self._proto_info[index][0]}>, get<{name}>."
)
if output:
output += " "
Expand Down
17 changes: 9 additions & 8 deletions python/paddle/distributed/fleet/dataset/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,8 +307,7 @@ def _check_use_var_with_data_generator(
data_gen_len = len(user_parsed_line)
if var_len != data_gen_len:
raise ValueError(
"var length mismatch error: var_list = %s vs data_generator = %s"
% (var_len, data_gen_len)
f"var length mismatch error: var_list = {var_len} vs data_generator = {data_gen_len}"
)

for i, ele in enumerate(user_parsed_line):
Expand All @@ -324,21 +323,23 @@ def _check_use_var_with_data_generator(
isinstance(ele, float) for ele in ele[1]
):
raise TypeError(
"var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-float value, which is %s \n"
"var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-float value, which is {} \n"
"Please check if order of var_list and data_generator are aligned. \n"
"Please check if var's type in data_generator is correct."
% (ele[0], "float", ele[1])
"Please check if var's type in data_generator is correct.".format(
ele[0], "float", ele[1]
)
)

if (
var_list[i].dtype == core.VarDesc.VarType.INT64
or var_list[i].dtype == core.VarDesc.VarType.INT32
) and not all(isinstance(ele, int) for ele in ele[1]):
raise TypeError(
"var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-int value, which is %s \n"
"var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-int value, which is {} \n"
"Please check if order of var_list and data_generator are aligned. \n"
"Please check if var's type in data_generator is correct."
% (ele[0], "int", ele[1])
"Please check if var's type in data_generator is correct.".format(
ele[0], "int", ele[1]
)
)

else:
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/distributed/fleet/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,8 +695,9 @@ def get_gpus(gpus):
for x in gpus.split(','):
assert x in cuda_visible_devices_list, (
"Can't find "
"your gpus %s in CUDA_VISIBLE_DEVICES[%s]."
% (x, cuda_visible_devices)
"your gpus {} in CUDA_VISIBLE_DEVICES[{}].".format(
x, cuda_visible_devices
)
)
res_gpus = [
cuda_visible_devices_list.index(x.strip())
Expand Down Expand Up @@ -1485,10 +1486,9 @@ def get_role_endpoints(self, args):
else:
self.current_node_ip = pod_ip
if not self.distribute_mode == DistributeMode.PS_HETER:
assert self.current_node_ip in self.node_ips, (
"Can't find your local ip {%s} in args.servers and args.workers ips: {%s}"
% (self.current_node_ip, self.node_ips)
)
assert (
self.current_node_ip in self.node_ips
), f"Can't find your local ip {{{self.current_node_ip}}} in args.servers and args.workers ips: {{{self.node_ips}}}"
if self.current_node_ip in self.node_ips:
self.node_rank = self.node_ips.index(self.current_node_ip)
logger.debug(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,8 @@ def initialize_p2p_groups(
) = _hcg.get_p2p_groups()

debug_str = (
"P2pInfo: send_next_group: %s, send_prev_group: %s, "
"recv_next_group: %s, recv_prev_group: %s"
% (
"P2pInfo: send_next_group: {}, send_prev_group: {}, "
"recv_next_group: {}, recv_prev_group: {}".format(
repr(send_next_group),
repr(send_prev_group),
repr(recv_next_group),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,8 @@ def __init__(
else sharding_optimizer
)
assert all(
[
isinstance(opt, GroupShardedOptimizerStage2)
for opt in self._sharding_optimizers
]
isinstance(opt, GroupShardedOptimizerStage2)
for opt in self._sharding_optimizers
), "Please use GroupShardedOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def add_rank_params(self, trainable_params, param2align, convert_gpu=True):
"""

assert all(
[id(param) not in self._param_ids for param in trainable_params]
id(param) not in self._param_ids for param in trainable_params
), "The same param cannot be checked in twice"
assert self.buffer is not None

Expand Down
10 changes: 3 additions & 7 deletions python/paddle/distributed/metric/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,16 +154,12 @@ def print_metric(metric_ptr, name):
"""
if name.find("wuauc") != -1:
metric = metric_ptr.get_wuauc_metric_msg(name)
monitor_msg = (
"%s: User Count=%.0f INS Count=%.0f UAUC=%.6f WUAUC=%.6f "
% (name, metric[0], metric[1], metric[4], metric[5])
)
monitor_msg = f"{name}: User Count={metric[0]:.0f} INS Count={metric[1]:.0f} UAUC={metric[4]:.6f} WUAUC={metric[5]:.6f} "
else:
metric = metric_ptr.get_metric_msg(name)
monitor_msg = (
"%s: AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f RMSE=%.6f "
"Actual CTR=%.6f Predicted CTR=%.6f COPC=%.6f INS Count=%.0f"
% (
"{}: AUC={:.6f} BUCKET_ERROR={:.6f} MAE={:.6f} RMSE={:.6f} "
"Actual CTR={:.6f} Predicted CTR={:.6f} COPC={:.6f} INS Count={:.0f}".format(
name,
metric[0],
metric[1],
Expand Down
3 changes: 1 addition & 2 deletions python/paddle/distributed/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -425,8 +425,7 @@ def init_reducer(self):
params_set.add(param)
if not isinstance(param, self.var_dtype):
raise TypeError(
"The data type of '%s' must be '%s'"
% (param.name, self.var_dtype)
f"The data type of '{param.name}' must be '{self.var_dtype}'"
)
if param.trainable:
layers_param.append((sublayer, param))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def reserved_vars(self):
return self._reserved_vars

def is_recompute(self):
return any([is_recompute_op(op) for op in self.ops])
return any(is_recompute_op(op) for op in self.ops)

def build_states(self):
for i, op in enumerate(self.ops):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/passes/fuse_all_reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def is_valid_allreduce_op(op):
if in_var.type != core.VarDesc.VarType.LOD_TENSOR:
return False
shape = in_var.shape
if any([s <= 0 for s in shape]):
if any(s <= 0 for s in shape):
return False
return True

Expand Down
Loading