diff --git a/tests/dragon/test_core_machine_learning_worker.py b/tests/dragon/test_core_machine_learning_worker.py index ed9ac625cd..e9c356b4e0 100644 --- a/tests/dragon/test_core_machine_learning_worker.py +++ b/tests/dragon/test_core_machine_learning_worker.py @@ -34,7 +34,7 @@ import torch import smartsim.error as sse -from smartsim._core.mli.infrastructure.storage.feature_store import FeatureStoreKey +from smartsim._core.mli.infrastructure.storage.feature_store import ModelKey, TensorKey from smartsim._core.mli.infrastructure.worker.worker import ( InferenceRequest, MachineLearningWorkerCore, @@ -98,7 +98,7 @@ def test_fetch_model_disk(persist_torch_model: pathlib.Path, test_dir: str) -> N fsd = feature_store.descriptor feature_store[str(persist_torch_model)] = persist_torch_model.read_bytes() - model_key = FeatureStoreKey(key=key, descriptor=fsd) + model_key = ModelKey(key=key, descriptor=fsd) request = InferenceRequest(model_key=model_key) batch = RequestBatch([request], None, model_key) @@ -116,7 +116,7 @@ def test_fetch_model_disk_missing() -> None: key = "/path/that/doesnt/exist" - model_key = FeatureStoreKey(key=key, descriptor=fsd) + model_key = ModelKey(key=key, descriptor=fsd) request = InferenceRequest(model_key=model_key) batch = RequestBatch([request], None, model_key) @@ -141,7 +141,7 @@ def test_fetch_model_feature_store(persist_torch_model: pathlib.Path) -> None: fsd = feature_store.descriptor feature_store[key] = persist_torch_model.read_bytes() - model_key = FeatureStoreKey(key=key, descriptor=feature_store.descriptor) + model_key = ModelKey(key=key, descriptor=feature_store.descriptor) request = InferenceRequest(model_key=model_key) batch = RequestBatch([request], None, model_key) @@ -159,7 +159,7 @@ def test_fetch_model_feature_store_missing() -> None: feature_store = MemoryFeatureStore() fsd = feature_store.descriptor - model_key = FeatureStoreKey(key=key, descriptor=feature_store.descriptor) + model_key = ModelKey(key=key, descriptor=feature_store.descriptor) request = InferenceRequest(model_key=model_key) batch = RequestBatch([request], None, model_key) @@ -182,7 +182,7 @@ def test_fetch_model_memory(persist_torch_model: pathlib.Path) -> None: fsd = feature_store.descriptor feature_store[key] = persist_torch_model.read_bytes() - model_key = FeatureStoreKey(key=key, descriptor=feature_store.descriptor) + model_key = ModelKey(key=key, descriptor=feature_store.descriptor) request = InferenceRequest(model_key=model_key) batch = RequestBatch([request], None, model_key) @@ -199,11 +199,9 @@ def test_fetch_input_disk(persist_torch_tensor: pathlib.Path) -> None: feature_store = MemoryFeatureStore() fsd = feature_store.descriptor - request = InferenceRequest( - input_keys=[FeatureStoreKey(key=tensor_name, descriptor=fsd)] - ) + request = InferenceRequest(input_keys=[TensorKey(key=tensor_name, descriptor=fsd)]) - model_key = FeatureStoreKey(key="test-model", descriptor=fsd) + model_key = ModelKey(key="test-model", descriptor=fsd) batch = RequestBatch([request], None, model_key) worker = MachineLearningWorkerCore @@ -223,9 +221,9 @@ def test_fetch_input_disk_missing() -> None: fsd = feature_store.descriptor key = "/path/that/doesnt/exist" - request = InferenceRequest(input_keys=[FeatureStoreKey(key=key, descriptor=fsd)]) + request = InferenceRequest(input_keys=[TensorKey(key=key, descriptor=fsd)]) - model_key = FeatureStoreKey(key="test-model", descriptor=fsd) + model_key = ModelKey(key="test-model", descriptor=fsd) batch = RequestBatch([request], None, model_key) with pytest.raises(sse.SmartSimError) as ex: @@ -245,14 +243,12 @@ def test_fetch_input_feature_store(persist_torch_tensor: pathlib.Path) -> None: feature_store = MemoryFeatureStore() fsd = feature_store.descriptor - request = InferenceRequest( - input_keys=[FeatureStoreKey(key=tensor_name, descriptor=fsd)] - ) + request = InferenceRequest(input_keys=[TensorKey(key=tensor_name, descriptor=fsd)]) # put model bytes into the feature store feature_store[tensor_name] = persist_torch_tensor.read_bytes() - model_key = FeatureStoreKey(key="test-model", descriptor=fsd) + model_key = ModelKey(key="test-model", descriptor=fsd) batch = RequestBatch([request], None, model_key) fetch_result = worker.fetch_inputs(batch, {fsd: feature_store}) @@ -284,13 +280,13 @@ def test_fetch_multi_input_feature_store(persist_torch_tensor: pathlib.Path) -> request = InferenceRequest( input_keys=[ - FeatureStoreKey(key=tensor_name + "1", descriptor=fsd), - FeatureStoreKey(key=tensor_name + "2", descriptor=fsd), - FeatureStoreKey(key=tensor_name + "3", descriptor=fsd), + TensorKey(key=tensor_name + "1", descriptor=fsd), + TensorKey(key=tensor_name + "2", descriptor=fsd), + TensorKey(key=tensor_name + "3", descriptor=fsd), ] ) - model_key = FeatureStoreKey(key="test-model", descriptor=fsd) + model_key = ModelKey(key="test-model", descriptor=fsd) batch = RequestBatch([request], None, model_key) fetch_result = worker.fetch_inputs(batch, {fsd: feature_store}) @@ -310,9 +306,9 @@ def test_fetch_input_feature_store_missing() -> None: key = "bad-key" feature_store = MemoryFeatureStore() fsd = feature_store.descriptor - request = InferenceRequest(input_keys=[FeatureStoreKey(key=key, descriptor=fsd)]) + request = InferenceRequest(input_keys=[TensorKey(key=key, descriptor=fsd)]) - model_key = FeatureStoreKey(key="test-model", descriptor=fsd) + model_key = ModelKey(key="test-model", descriptor=fsd) batch = RequestBatch([request], None, model_key) with pytest.raises(sse.SmartSimError) as ex: @@ -332,9 +328,9 @@ def test_fetch_input_memory(persist_torch_tensor: pathlib.Path) -> None: key = "test-model" feature_store[key] = persist_torch_tensor.read_bytes() - request = InferenceRequest(input_keys=[FeatureStoreKey(key=key, descriptor=fsd)]) + request = InferenceRequest(input_keys=[TensorKey(key=key, descriptor=fsd)]) - model_key = FeatureStoreKey(key="test-model", descriptor=fsd) + model_key = ModelKey(key="test-model", descriptor=fsd) batch = RequestBatch([request], None, model_key) fetch_result = worker.fetch_inputs(batch, {fsd: feature_store}) @@ -351,9 +347,9 @@ def test_place_outputs() -> None: # create a key to retrieve from the feature store keys = [ - FeatureStoreKey(key=key_name + "1", descriptor=fsd), - FeatureStoreKey(key=key_name + "2", descriptor=fsd), - FeatureStoreKey(key=key_name + "3", descriptor=fsd), + TensorKey(key=key_name + "1", descriptor=fsd), + TensorKey(key=key_name + "2", descriptor=fsd), + TensorKey(key=key_name + "3", descriptor=fsd), ] data = [b"abcdef", b"ghijkl", b"mnopqr"] @@ -376,6 +372,6 @@ def test_place_outputs() -> None: pytest.param("key", "", id="invalid descriptor"), ], ) -def test_invalid_featurestorekey(key, descriptor) -> None: +def test_invalid_tensorkey(key, descriptor) -> None: with pytest.raises(ValueError): - fsk = FeatureStoreKey(key, descriptor) + fsk = TensorKey(key, descriptor) diff --git a/tests/dragon/test_device_manager.py b/tests/dragon/test_device_manager.py index c58879cb62..d270e921cb 100644 --- a/tests/dragon/test_device_manager.py +++ b/tests/dragon/test_device_manager.py @@ -36,7 +36,8 @@ ) from smartsim._core.mli.infrastructure.storage.feature_store import ( FeatureStore, - FeatureStoreKey, + ModelKey, + TensorKey, ) from smartsim._core.mli.infrastructure.worker.worker import ( ExecuteResult, @@ -116,9 +117,9 @@ def test_device_manager_model_in_request(): worker = MockWorker() - tensor_key = FeatureStoreKey(key="key", descriptor="desc") - output_key = FeatureStoreKey(key="key", descriptor="desc") - model_key = FeatureStoreKey(key="model key", descriptor="desc") + tensor_key = TensorKey(key="key", descriptor="desc") + output_key = TensorKey(key="key", descriptor="desc") + model_key = ModelKey(key="model key", descriptor="desc") request = InferenceRequest( model_key=model_key, @@ -154,9 +155,9 @@ def test_device_manager_model_key(): worker = MockWorker() - tensor_key = FeatureStoreKey(key="key", descriptor="desc") - output_key = FeatureStoreKey(key="key", descriptor="desc") - model_key = FeatureStoreKey(key="model key", descriptor="desc") + tensor_key = TensorKey(key="key", descriptor="desc") + output_key = TensorKey(key="key", descriptor="desc") + model_key = ModelKey(key="model key", descriptor="desc") request = InferenceRequest( model_key=model_key, diff --git a/tests/dragon/test_error_handling.py b/tests/dragon/test_error_handling.py index 2039df190d..2e528ab639 100644 --- a/tests/dragon/test_error_handling.py +++ b/tests/dragon/test_error_handling.py @@ -55,7 +55,8 @@ ) from smartsim._core.mli.infrastructure.storage.feature_store import ( FeatureStore, - FeatureStoreKey, + ModelKey, + TensorKey, ) from smartsim._core.mli.infrastructure.worker.worker import ( ExecuteResult, @@ -145,7 +146,7 @@ def setup_worker_manager_model_bytes( batch_size=0, ) - model_id = FeatureStoreKey(key="key", descriptor=app_feature_store.descriptor) + model_id = ModelKey(key="key", descriptor=app_feature_store.descriptor) request_batch = RequestBatch( [inf_request], @@ -190,9 +191,9 @@ def setup_worker_manager_model_key( cooldown=3, ) - tensor_key = FeatureStoreKey(key="key", descriptor=app_feature_store.descriptor) - output_key = FeatureStoreKey(key="key", descriptor=app_feature_store.descriptor) - model_id = FeatureStoreKey(key="model key", descriptor=app_feature_store.descriptor) + tensor_key = TensorKey(key="key", descriptor=app_feature_store.descriptor) + output_key = TensorKey(key="key", descriptor=app_feature_store.descriptor) + model_id = ModelKey(key="model key", descriptor=app_feature_store.descriptor) request = InferenceRequest( model_key=model_id, diff --git a/tests/dragon/test_torch_worker.py b/tests/dragon/test_torch_worker.py index 9a5ed6309f..2a9e7d01bd 100644 --- a/tests/dragon/test_torch_worker.py +++ b/tests/dragon/test_torch_worker.py @@ -37,7 +37,7 @@ from torch import nn from torch.nn import functional as F -from smartsim._core.mli.infrastructure.storage.feature_store import FeatureStoreKey +from smartsim._core.mli.infrastructure.storage.feature_store import ModelKey from smartsim._core.mli.infrastructure.worker.torch_worker import TorchWorker from smartsim._core.mli.infrastructure.worker.worker import ( ExecuteResult, @@ -109,7 +109,7 @@ def get_request() -> InferenceRequest: ] return InferenceRequest( - model_key=FeatureStoreKey(key="model", descriptor="xyz"), + model_key=ModelKey(key="model", descriptor="xyz"), callback=None, raw_inputs=tensor_numpy, input_keys=None,