Skip to content

Commit

Permalink
Remove indexpath message
Browse files Browse the repository at this point in the history
  • Loading branch information
b8raoult committed Dec 22, 2021
1 parent af3e3ba commit d282a4b
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 22 deletions.
16 changes: 10 additions & 6 deletions cfgrib/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,10 @@ def __getitem__(self, item):


def build_geography_coordinates(
first: abc.Field, encode_cf: T.Sequence[str], errors: str, log: logging.Logger = LOG,
first: abc.Field,
encode_cf: T.Sequence[str],
errors: str,
log: logging.Logger = LOG,
) -> T.Tuple[T.Tuple[str, ...], T.Tuple[int, ...], T.Dict[str, Variable]]:
geo_coord_vars = {} # type: T.Dict[str, Variable]
grid_type = first["gridType"]
Expand Down Expand Up @@ -558,7 +561,8 @@ def build_variable_components(
if "time" in coord_vars and "step" in coord_vars:
# add the 'valid_time' secondary coordinate
time_dims, time_data = cfmessage.build_valid_time(
coord_vars["time"].data, coord_vars["step"].data,
coord_vars["time"].data,
coord_vars["step"].data,
)
attrs = COORD_ATTRS["valid_time"]
coord_vars["valid_time"] = Variable(dimensions=time_dims, data=time_data, attributes=attrs)
Expand Down Expand Up @@ -721,8 +725,8 @@ def open_fieldset(
**kwargs: T.Any,
) -> Dataset:
"""Builds a ``cfgrib.Dataset`` form a mapping of mappings."""
if indexpath is not None:
log.info(f"indexpath value {indexpath} is ignored")
if indexpath is not None and indexpath is not messages.DEFAULT_INDEXPATH:
log.warning(f"indexpath value {indexpath} is ignored")

index_keys = compute_index_keys(time_dims, extra_coords, filter_by_keys)
index = messages.FieldsetIndex.from_fieldset(fieldset, index_keys, computed_keys)
Expand All @@ -732,7 +736,7 @@ def open_fieldset(

def open_fileindex(
stream: messages.FileStream,
indexpath: str = "{path}.{short_hash}.idx",
indexpath: str = messages.DEFAULT_INDEXPATH,
index_keys: T.Sequence[str] = INDEX_KEYS + ["time", "step"],
filter_by_keys: T.Dict[str, T.Any] = {},
computed_keys: messages.ComputedKeysType = cfmessage.COMPUTED_KEYS,
Expand All @@ -747,7 +751,7 @@ def open_fileindex(
def open_file(
path: T.Union[str, "os.PathLike[str]"],
grib_errors: str = "warn",
indexpath: str = "{path}.{short_hash}.idx",
indexpath: str = messages.DEFAULT_INDEXPATH,
filter_by_keys: T.Dict[str, T.Any] = {},
read_keys: T.Sequence[str] = (),
time_dims: T.Sequence[str] = ("time", "step"),
Expand Down
3 changes: 2 additions & 1 deletion cfgrib/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ def multi_enabled(file: T.IO[bytes]) -> T.Iterator[None]:
"": None,
}

DEFAULT_INDEXPATH = "{path}.{short_hash}.idx"

OffsetType = T.Union[int, T.Tuple[int, int]]

Expand Down Expand Up @@ -500,7 +501,7 @@ class FileIndex(FieldsetIndex):

@classmethod
def from_indexpath_or_filestream(
cls, filestream, index_keys, indexpath="{path}.{short_hash}.idx", computed_keys={}, log=LOG
cls, filestream, index_keys, indexpath=DEFAULT_INDEXPATH, computed_keys={}, log=LOG
):
# type: (FileStream, T.Sequence[str], str, ComputedKeysType, logging.Logger) -> FileIndex

Expand Down
24 changes: 18 additions & 6 deletions cfgrib/xarray_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import numpy as np
import xarray as xr

from . import abc, dataset
from . import abc, dataset, messages

if LooseVersion(xr.__version__) <= "0.17.0":
raise ImportError("xarray_plugin module needs xarray version >= 0.18+")
Expand Down Expand Up @@ -38,7 +38,10 @@ def __init__(
opener = dataset.open_fieldset
self.ds = opener(filename, **backend_kwargs)

def open_store_variable(self, var: dataset.Variable,) -> xr.Variable:
def open_store_variable(
self,
var: dataset.Variable,
) -> xr.Variable:
if isinstance(var.data, np.ndarray):
data = var.data
else:
Expand Down Expand Up @@ -67,7 +70,10 @@ def get_encoding(self) -> T.Dict[str, T.Set[str]]:


class CfGribBackend(BackendEntrypoint):
def guess_can_open(self, store_spec: str,) -> bool:
def guess_can_open(
self,
store_spec: str,
) -> bool:
try:
_, ext = os.path.splitext(store_spec)
except TypeError:
Expand All @@ -86,7 +92,7 @@ def open_dataset(
use_cftime: T.Union[bool, None] = None,
decode_timedelta: T.Union[bool, None] = None,
lock: T.Union[T.ContextManager[T.Any], None] = None,
indexpath: str = "{path}.{short_hash}.idx",
indexpath: str = messages.DEFAULT_INDEXPATH,
filter_by_keys: T.Dict[str, T.Any] = {},
read_keys: T.Iterable[str] = (),
encode_cf: T.Sequence[str] = ("parameter", "time", "geography", "vertical"),
Expand Down Expand Up @@ -139,11 +145,17 @@ def __init__(
self.dtype = array.dtype
self.array = array

def __getitem__(self, key: xr.core.indexing.ExplicitIndexer,) -> np.ndarray:
def __getitem__(
self,
key: xr.core.indexing.ExplicitIndexer,
) -> np.ndarray:
return xr.core.indexing.explicit_indexing_adapter(
key, self.shape, xr.core.indexing.IndexingSupport.BASIC, self._getitem
)

def _getitem(self, key: T.Tuple[T.Any, ...],) -> np.ndarray:
def _getitem(
self,
key: T.Tuple[T.Any, ...],
) -> np.ndarray:
with self.datastore.lock:
return self.array[key]
6 changes: 3 additions & 3 deletions tests/test_30_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def test_build_data_var_components_encode_cf_geography() -> None:
def test_build_dataset_components_time_dims() -> None:
index_keys = sorted(dataset.INDEX_KEYS + ["time", "step"])
stream = messages.FileStream(TEST_DATA_UKMO, "warn")
index = dataset.open_fileindex(stream, "{path}.{short_hash}.idx", index_keys)
index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, index_keys)
dims = dataset.build_dataset_components(index, read_keys=[])[0]
assert dims == {
"latitude": 6,
Expand All @@ -111,7 +111,7 @@ def test_build_dataset_components_time_dims() -> None:
time_dims = ["indexing_time", "verifying_time"]
index_keys = sorted(dataset.INDEX_KEYS + time_dims)
stream = messages.FileStream(TEST_DATA_UKMO, "warn")
index = dataset.open_fileindex(stream, "{path}.{short_hash}.idx", index_keys)
index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, index_keys)
dims, *_ = dataset.build_dataset_components(index, read_keys=[], time_dims=time_dims)
assert dims == {
"number": 28,
Expand All @@ -124,7 +124,7 @@ def test_build_dataset_components_time_dims() -> None:
time_dims = ["indexing_time", "step"]
index_keys = sorted(dataset.INDEX_KEYS + time_dims)
stream = messages.FileStream(TEST_DATA_UKMO, "warn")
index = dataset.open_fileindex(stream, "{path}.{short_hash}.idx", index_keys)
index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, index_keys)
dims, *_ = dataset.build_dataset_components(index, read_keys=[], time_dims=time_dims)
assert dims == {"number": 28, "indexing_time": 2, "step": 20, "latitude": 6, "longitude": 11}

Expand Down
10 changes: 5 additions & 5 deletions tests/test_40_xarray_store.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"\n",
"import xarray as xr\n",
"\n",
"TEST_GRIB = 'sample-data/era5-levels-members.grib'"
"TEST_GRIB = \"sample-data/era5-levels-members.grib\""
]
},
{
Expand All @@ -19,7 +19,7 @@
"metadata": {},
"outputs": [],
"source": [
"ds = xr.open_dataset(TEST_GRIB, engine='cfgrib', cache=False)\n",
"ds = xr.open_dataset(TEST_GRIB, engine=\"cfgrib\", cache=False)\n",
"ds"
]
},
Expand All @@ -29,7 +29,7 @@
"metadata": {},
"outputs": [],
"source": [
"da = ds.data_vars['t']\n",
"da = ds.data_vars[\"t\"]\n",
"da"
]
},
Expand Down Expand Up @@ -93,7 +93,7 @@
"metadata": {},
"outputs": [],
"source": [
"dasel = da.sel(isobaricInhPa=500, time='2017-01-02T12:00:00')\n",
"dasel = da.sel(isobaricInhPa=500, time=\"2017-01-02T12:00:00\")\n",
"dasel"
]
},
Expand All @@ -103,7 +103,7 @@
"metadata": {},
"outputs": [],
"source": [
"dasel.plot(col='number', col_wrap=3, figsize=(15, 12))"
"dasel.plot(col=\"number\", col_wrap=3, figsize=(15, 12))"
]
},
{
Expand Down
6 changes: 5 additions & 1 deletion tests/test_50_sample_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,11 @@ def test_canonical_dataset_to_grib(grib_name: str, tmpdir: py.path.local) -> Non


@pytest.mark.parametrize(
"grib_name,ndims", [("era5-levels-members", 1), ("era5-single-level-scalar-time", 0),],
"grib_name,ndims",
[
("era5-levels-members", 1),
("era5-single-level-scalar-time", 0),
],
)
def test_open_dataset_extra_coords(grib_name: str, ndims: T.Any) -> None:
grib_path = os.path.join(SAMPLE_DATA_FOLDER, grib_name + ".grib")
Expand Down

0 comments on commit d282a4b

Please sign in to comment.