Skip to content

Commit

Permalink
Merge pull request #4444 from neutrinoceros/flake8-comp
Browse files Browse the repository at this point in the history
  • Loading branch information
neutrinoceros authored May 17, 2023
2 parents b87c49f + 3d60ee4 commit e6ee162
Show file tree
Hide file tree
Showing 85 changed files with 1,140 additions and 1,117 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
[![CI (bleeding edge)](https://github.com/yt-project/yt/actions/workflows/bleeding-edge.yaml/badge.svg)](https://github.com/yt-project/yt/actions/workflows/bleeding-edge.yaml)
[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/yt-project/yt/main.svg)](https://results.pre-commit.ci/latest/github/yt-project/yt/main)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v1.json)](https://github.com/charliermarsh/ruff)
[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json)](https://github.com/charliermarsh/ruff)

<!--- [![codecov](https://codecov.io/gh/yt-project/yt/branch/main/graph/badge.svg)](https://codecov.io/gh/yt-project/yt) --->

Expand Down
38 changes: 19 additions & 19 deletions doc/source/examining/Loading_Generic_Array_Data.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
},
"outputs": [],
"source": [
"data = dict(density=(arr, \"g/cm**3\"))\n",
"data = {\"density\": (arr, \"g/cm**3\")}\n",
"bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
"ds = yt.load_uniform_grid(data, arr.shape, length_unit=\"Mpc\", bbox=bbox, nprocs=64)"
]
Expand Down Expand Up @@ -161,12 +161,12 @@
"posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
"posy_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
"posz_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
"data = dict(\n",
" density=(np.random.random(size=(64, 64, 64)), \"Msun/kpc**3\"),\n",
" particle_position_x=(posx_arr, \"code_length\"),\n",
" particle_position_y=(posy_arr, \"code_length\"),\n",
" particle_position_z=(posz_arr, \"code_length\"),\n",
")\n",
"data = {\n",
" \"density\": (np.random.random(size=(64, 64, 64)), \"Msun/kpc**3\"),\n",
" \"particle_position_x\": (posx_arr, \"code_length\"),\n",
" \"particle_position_y\": (posy_arr, \"code_length\"),\n",
" \"particle_position_z\": (posz_arr, \"code_length\"),\n",
"}\n",
"bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
"ds = yt.load_uniform_grid(\n",
" data,\n",
Expand Down Expand Up @@ -581,18 +581,18 @@
"outputs": [],
"source": [
"grid_data = [\n",
" dict(\n",
" left_edge=[0.0, 0.0, 0.0],\n",
" right_edge=[1.0, 1.0, 1.0],\n",
" level=0,\n",
" dimensions=[32, 32, 32],\n",
" ),\n",
" dict(\n",
" left_edge=[0.25, 0.25, 0.25],\n",
" right_edge=[0.75, 0.75, 0.75],\n",
" level=1,\n",
" dimensions=[32, 32, 32],\n",
" ),\n",
" {\n",
" \"left_edge\": [0.0, 0.0, 0.0],\n",
" \"right_edge\": [1.0, 1.0, 1.0],\n",
" \"level\": 0,\n",
" \"dimensions\": [32, 32, 32],\n",
" },\n",
" {\n",
" \"left_edge\": [0.25, 0.25, 0.25],\n",
" \"right_edge\": [0.75, 0.75, 0.75],\n",
" \"level\": 1,\n",
" \"dimensions\": [32, 32, 32],\n",
" },\n",
"]"
]
},
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ select = [
"E",
"F",
"W",
"C4", # flake8-comprehensions
"B", # flake8-bugbear
"G", # flake8-logging-format
"YTT", # flake8-2020
Expand Down
2 changes: 1 addition & 1 deletion tests/report_failed_answers.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def parse_nose_xml(nose_xml):
"""
missing_answers = set()
failed_answers = collections.defaultdict(lambda: dict())
failed_answers = collections.defaultdict(lambda: {})
missing_errors = ["No such file or directory", "There is no old answer available"]
tree = ET.parse(nose_xml)
testsuite = tree.getroot()
Expand Down
104 changes: 52 additions & 52 deletions yt/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,58 +4,58 @@

ytcfg_defaults = {}

ytcfg_defaults["yt"] = dict(
serialize=False,
only_deserialize=False,
time_functions=False,
colored_logs=False,
suppress_stream_logging=False,
stdout_stream_logging=False,
log_level=20,
inline=False,
num_threads=-1,
store_parameter_files=False,
parameter_file_store="parameter_files.csv",
maximum_stored_datasets=500,
skip_dataset_cache=True,
load_field_plugins=False,
plugin_filename="my_plugins.py",
parallel_traceback=False,
pasteboard_repo="",
reconstruct_index=True,
test_storage_dir="/does/not/exist",
test_data_dir="/does/not/exist",
enzo_db="",
notebook_password="",
answer_testing_tolerance=3,
answer_testing_bitwise=False,
gold_standard_filename="gold311",
local_standard_filename="local001",
answer_tests_url="http://answers.yt-project.org/{1}_{2}",
sketchfab_api_key="None",
imagebin_api_key="e1977d9195fe39e",
imagebin_upload_url="https://api.imgur.com/3/image",
imagebin_delete_url="https://api.imgur.com/3/image/{delete_hash}",
curldrop_upload_url="http://use.yt/upload",
thread_field_detection=False,
ignore_invalid_unit_operation_errors=False,
chunk_size=1000,
xray_data_dir="/does/not/exist",
supp_data_dir="/does/not/exist",
default_colormap="cmyt.arbre",
ray_tracing_engine="yt",
internals=dict(
within_testing=False,
within_pytest=False,
parallel=False,
strict_requires=False,
global_parallel_rank=0,
global_parallel_size=1,
topcomm_parallel_rank=0,
topcomm_parallel_size=1,
command_line=False,
),
)
ytcfg_defaults["yt"] = {
"serialize": False,
"only_deserialize": False,
"time_functions": False,
"colored_logs": False,
"suppress_stream_logging": False,
"stdout_stream_logging": False,
"log_level": 20,
"inline": False,
"num_threads": -1,
"store_parameter_files": False,
"parameter_file_store": "parameter_files.csv",
"maximum_stored_datasets": 500,
"skip_dataset_cache": True,
"load_field_plugins": False,
"plugin_filename": "my_plugins.py",
"parallel_traceback": False,
"pasteboard_repo": "",
"reconstruct_index": True,
"test_storage_dir": "/does/not/exist",
"test_data_dir": "/does/not/exist",
"enzo_db": "",
"notebook_password": "",
"answer_testing_tolerance": 3,
"answer_testing_bitwise": False,
"gold_standard_filename": "gold311",
"local_standard_filename": "local001",
"answer_tests_url": "http://answers.yt-project.org/{1}_{2}",
"sketchfab_api_key": "None",
"imagebin_api_key": "e1977d9195fe39e",
"imagebin_upload_url": "https://api.imgur.com/3/image",
"imagebin_delete_url": "https://api.imgur.com/3/image/{delete_hash}",
"curldrop_upload_url": "http://use.yt/upload",
"thread_field_detection": False,
"ignore_invalid_unit_operation_errors": False,
"chunk_size": 1000,
"xray_data_dir": "/does/not/exist",
"supp_data_dir": "/does/not/exist",
"default_colormap": "cmyt.arbre",
"ray_tracing_engine": "yt",
"internals": {
"within_testing": False,
"within_pytest": False,
"parallel": False,
"strict_requires": False,
"global_parallel_rank": 0,
"global_parallel_size": 1,
"topcomm_parallel_rank": 0,
"topcomm_parallel_size": 1,
"command_line": False,
},
}


# For backward compatibility, do not use these vars internally in yt
Expand Down
8 changes: 4 additions & 4 deletions yt/data_objects/analyzer_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ def __repr__(self):

def analysis_task(params=None):
if params is None:
params = tuple()
params = ()

def create_new_class(func):
cls = type(func.__name__, (AnalysisTask,), dict(eval=func, _params=params))
cls = type(func.__name__, (AnalysisTask,), {"eval": func, "_params": params})
return cls

return create_new_class
Expand Down Expand Up @@ -67,7 +67,7 @@ class QuantityProxy(AnalysisTask):
def __repr__(self):
# Stolen from YTDataContainer.__repr__
s = f"{self.__class__.__name__}: "
s += ", ".join(["%s" % [arg for arg in self.args]])
s += ", ".join(["%s" % list(self.args)])
s += ", ".join(f"{k}={v}" for k, v in self.kwargs.items())
return s

Expand Down Expand Up @@ -103,6 +103,6 @@ def create_quantity_proxy(quantity_object):
params = args[1:]
if kwargs is not None:
params += kwargs
dd = dict(_params=params, quantity_name=quantity_object[0])
dd = {"_params": params, "quantity_name": quantity_object[0]}
cls = type(quantity_object[0], (QuantityProxy,), dd)
return cls
2 changes: 1 addition & 1 deletion yt/data_objects/construction_data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2516,7 +2516,7 @@ def _export_ply(
DLE = self.ds.domain_left_edge
DRE = self.ds.domain_right_edge
bounds = [(DLE[i], DRE[i]) for i in range(3)]
elif any([not all([isinstance(be, YTArray) for be in b]) for b in bounds]):
elif any(not all(isinstance(be, YTArray) for be in b) for b in bounds):
bounds = [
tuple(
be if isinstance(be, YTArray) else self.ds.quan(be, "code_length")
Expand Down
2 changes: 1 addition & 1 deletion yt/data_objects/level_sets/contour_finder.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def identify_contours(data_source, field, min_val, max_val, cached_fields=None):
contour_ids[pg.parent_grid_id].append((sl, ff))
pbar.update(i + 1)
pbar.finish()
rv = dict()
rv = {}
rv.update(contour_ids)
# NOTE: Because joins can appear in both a "final join" and a subsequent
# "join", we can't know for sure how many unique joins there are without
Expand Down
8 changes: 4 additions & 4 deletions yt/data_objects/level_sets/tests/test_clump_finding.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ def test_clump_tree_save():
ds2 = load(fn)

# compare clumps in the tree
t1 = [c for c in master_clump]
t2 = [c for c in ds2.tree]
t1 = list(master_clump)
t2 = list(ds2.tree)
mt1 = ds.arr([c.info["cell_mass"][1] for c in t1])
mt2 = ds2.arr([c["clump", "cell_mass"] for c in t2])
it1 = np.array(np.argsort(mt1).astype(int))
Expand All @@ -139,8 +139,8 @@ def test_clump_tree_save():
assert_array_equal(ct1["all", "particle_mass"], ct2["all", "particle_mass"])

# compare leaf clumps
c1 = [c for c in leaf_clumps]
c2 = [c for c in ds2.leaves]
c1 = list(leaf_clumps)
c2 = list(ds2.leaves)
mc1 = ds.arr([c.info["cell_mass"][1] for c in c1])
mc2 = ds2.arr([c["clump", "cell_mass"] for c in c2])
ic1 = np.array(np.argsort(mc1).astype(int))
Expand Down
4 changes: 2 additions & 2 deletions yt/data_objects/particle_filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ def available(self, field_list):
return all((self.filtered_type, field) in field_list for field in self.requires)

def missing(self, field_list):
return list(
return [
(self.filtered_type, field)
for field in self.requires
if (self.filtered_type, field) not in field_list
)
]

def wrap_func(self, field_name, old_fi):
new_fi = copy.copy(old_fi)
Expand Down
6 changes: 3 additions & 3 deletions yt/data_objects/particle_trajectories.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def trajectory_from_index(self, index):
if not np.any(mask):
print("The particle index %d is not in the list!" % (index))
raise IndexError
fields = [field for field in sorted(self.field_data.keys())]
fields = sorted(self.field_data.keys())
traj = {}
traj["particle_time"] = self.times
traj["particle_index"] = index
Expand All @@ -362,7 +362,7 @@ def write_out(self, filename_base):
>>> trajs = ParticleTrajectories(my_fns, indices)
>>> trajs.write_out("orbit_trajectory")
"""
fields = [field for field in sorted(self.field_data.keys())]
fields = sorted(self.field_data.keys())
num_fields = len(fields)
first_str = "# particle_time\t" + "\t".join(fields) + "\n"
template_str = "%g\t" * num_fields + "%g\n"
Expand Down Expand Up @@ -402,6 +402,6 @@ def write_out_h5(self, filename):
fid.create_dataset("particle_indices", dtype=np.int64, data=self.indices)
fid.close()
self.times.write_hdf5(filename, dataset_name="particle_times")
fields = [field for field in sorted(self.field_data.keys())]
fields = sorted(self.field_data.keys())
for field in fields:
self[field].write_hdf5(filename, dataset_name=f"{field}")
6 changes: 3 additions & 3 deletions yt/data_objects/profiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -1395,7 +1395,7 @@ def create_profile(

if isinstance(field_ex[0], tuple):
field_ex = [data_source.ds.quan(*f) for f in field_ex]
if any([exi is None for exi in field_ex]):
if any(exi is None for exi in field_ex):
try:
ds_extrema = data_source.quantities.extrema(bin_field)
except AttributeError:
Expand Down Expand Up @@ -1462,7 +1462,7 @@ def create_profile(
if mi <= 0 and l:
raise YTIllDefinedBounds(mi, ma)
args += [f, n, mi, ma, l]
kwargs = dict(weight_field=weight_field)
kwargs = {"weight_field": weight_field}
if cls is ParticleProfile:
kwargs["deposition"] = deposition
if override_bins is not None:
Expand All @@ -1472,7 +1472,7 @@ def create_profile(
obj.accumulation = accumulation
obj.fractional = fractional
if fields is not None:
obj.add_fields([field for field in fields])
obj.add_fields(list(fields))
for field in fields:
if fractional:
obj.field_data[field] /= obj.field_data[field].sum()
Expand Down
2 changes: 1 addition & 1 deletion yt/data_objects/tests/test_ellipsoid.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def test_ellipsoid():
ABC[:, 0] = 0.1
for i in range(12):
for c in cs:
A, B, C = reversed(sorted(ABC[:, i]))
A, B, C = sorted(ABC[:, i], reverse=True)
A = max(A, min_dx[0])
B = max(B, min_dx[1])
C = max(C, min_dx[2])
Expand Down
6 changes: 3 additions & 3 deletions yt/data_objects/tests/test_exclude_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_exclude_below():
def test_exclude_nan():
test_array = np.nan * np.ones((10, 10, 10))
test_array[1, 1, :] = 1
data = dict(density=test_array)
data = {"density": test_array}
ds = load_uniform_grid(data, test_array.shape, length_unit="cm", nprocs=1)
ad = ds.all_data()
no_nan_ds = ad.exclude_nan(("gas", "density"))
Expand All @@ -41,7 +41,7 @@ def test_equal():
test_array = np.ones((10, 10, 10))
test_array[1, 1, :] = 2.0
test_array[2, 1, :] = 3.0
data = dict(density=test_array)
data = {"density": test_array}
ds = load_uniform_grid(data, test_array.shape, length_unit="cm", nprocs=1)
ad = ds.all_data()
no_ones = ad.exclude_equal(("gas", "density"), 1.0)
Expand All @@ -54,7 +54,7 @@ def test_inside_outside():
test_array = np.ones((10, 10, 10))
test_array[1, 1, :] = 2.0
test_array[2, 1, :] = 3.0
data = dict(density=test_array)
data = {"density": test_array}
ds = load_uniform_grid(data, test_array.shape, length_unit="cm", nprocs=1)
ad = ds.all_data()

Expand Down
24 changes: 12 additions & 12 deletions yt/data_objects/tests/test_refinement.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,18 @@ def setup_fake_refby():
n3 = 2

grid_data = [
dict(
left_edge=[0.0, 0.0, 0.0],
right_edge=[1.0, np.pi, np.pi * 2.0],
level=0,
dimensions=np.array([n1, n2, n3]),
),
dict(
left_edge=[0.0, 0.0, 0.0],
right_edge=[0.5, np.pi, np.pi * 2.0],
level=1,
dimensions=refine_by * [n1 / 2.0, n2, n3],
),
{
"left_edge": [0.0, 0.0, 0.0],
"right_edge": [1.0, np.pi, np.pi * 2.0],
"level": 0,
"dimensions": np.array([n1, n2, n3]),
},
{
"left_edge": [0.0, 0.0, 0.0],
"right_edge": [0.5, np.pi, np.pi * 2.0],
"level": 1,
"dimensions": refine_by * [n1 / 2.0, n2, n3],
},
]

for g in grid_data:
Expand Down
Loading

0 comments on commit e6ee162

Please sign in to comment.