Skip to content

Commit

Permalink
revert featurestorekey test changes
Browse files Browse the repository at this point in the history
  • Loading branch information
ankona committed Sep 18, 2024
1 parent 9b437cb commit 4b6ade0
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 43 deletions.
54 changes: 25 additions & 29 deletions tests/dragon/test_core_machine_learning_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
import torch

import smartsim.error as sse
from smartsim._core.mli.infrastructure.storage.feature_store import FeatureStoreKey
from smartsim._core.mli.infrastructure.storage.feature_store import ModelKey, TensorKey
from smartsim._core.mli.infrastructure.worker.worker import (
InferenceRequest,
MachineLearningWorkerCore,
Expand Down Expand Up @@ -98,7 +98,7 @@ def test_fetch_model_disk(persist_torch_model: pathlib.Path, test_dir: str) -> N
fsd = feature_store.descriptor
feature_store[str(persist_torch_model)] = persist_torch_model.read_bytes()

model_key = FeatureStoreKey(key=key, descriptor=fsd)
model_key = ModelKey(key=key, descriptor=fsd)
request = InferenceRequest(model_key=model_key)
batch = RequestBatch([request], None, model_key)

Expand All @@ -116,7 +116,7 @@ def test_fetch_model_disk_missing() -> None:

key = "/path/that/doesnt/exist"

model_key = FeatureStoreKey(key=key, descriptor=fsd)
model_key = ModelKey(key=key, descriptor=fsd)
request = InferenceRequest(model_key=model_key)
batch = RequestBatch([request], None, model_key)

Expand All @@ -141,7 +141,7 @@ def test_fetch_model_feature_store(persist_torch_model: pathlib.Path) -> None:
fsd = feature_store.descriptor
feature_store[key] = persist_torch_model.read_bytes()

model_key = FeatureStoreKey(key=key, descriptor=feature_store.descriptor)
model_key = ModelKey(key=key, descriptor=feature_store.descriptor)
request = InferenceRequest(model_key=model_key)
batch = RequestBatch([request], None, model_key)

Expand All @@ -159,7 +159,7 @@ def test_fetch_model_feature_store_missing() -> None:
feature_store = MemoryFeatureStore()
fsd = feature_store.descriptor

model_key = FeatureStoreKey(key=key, descriptor=feature_store.descriptor)
model_key = ModelKey(key=key, descriptor=feature_store.descriptor)
request = InferenceRequest(model_key=model_key)
batch = RequestBatch([request], None, model_key)

Expand All @@ -182,7 +182,7 @@ def test_fetch_model_memory(persist_torch_model: pathlib.Path) -> None:
fsd = feature_store.descriptor
feature_store[key] = persist_torch_model.read_bytes()

model_key = FeatureStoreKey(key=key, descriptor=feature_store.descriptor)
model_key = ModelKey(key=key, descriptor=feature_store.descriptor)
request = InferenceRequest(model_key=model_key)
batch = RequestBatch([request], None, model_key)

Expand All @@ -199,11 +199,9 @@ def test_fetch_input_disk(persist_torch_tensor: pathlib.Path) -> None:

feature_store = MemoryFeatureStore()
fsd = feature_store.descriptor
request = InferenceRequest(
input_keys=[FeatureStoreKey(key=tensor_name, descriptor=fsd)]
)
request = InferenceRequest(input_keys=[TensorKey(key=tensor_name, descriptor=fsd)])

model_key = FeatureStoreKey(key="test-model", descriptor=fsd)
model_key = ModelKey(key="test-model", descriptor=fsd)
batch = RequestBatch([request], None, model_key)

worker = MachineLearningWorkerCore
Expand All @@ -223,9 +221,9 @@ def test_fetch_input_disk_missing() -> None:
fsd = feature_store.descriptor
key = "/path/that/doesnt/exist"

request = InferenceRequest(input_keys=[FeatureStoreKey(key=key, descriptor=fsd)])
request = InferenceRequest(input_keys=[TensorKey(key=key, descriptor=fsd)])

model_key = FeatureStoreKey(key="test-model", descriptor=fsd)
model_key = ModelKey(key="test-model", descriptor=fsd)
batch = RequestBatch([request], None, model_key)

with pytest.raises(sse.SmartSimError) as ex:
Expand All @@ -245,14 +243,12 @@ def test_fetch_input_feature_store(persist_torch_tensor: pathlib.Path) -> None:
feature_store = MemoryFeatureStore()
fsd = feature_store.descriptor

request = InferenceRequest(
input_keys=[FeatureStoreKey(key=tensor_name, descriptor=fsd)]
)
request = InferenceRequest(input_keys=[TensorKey(key=tensor_name, descriptor=fsd)])

# put model bytes into the feature store
feature_store[tensor_name] = persist_torch_tensor.read_bytes()

model_key = FeatureStoreKey(key="test-model", descriptor=fsd)
model_key = ModelKey(key="test-model", descriptor=fsd)
batch = RequestBatch([request], None, model_key)

fetch_result = worker.fetch_inputs(batch, {fsd: feature_store})
Expand Down Expand Up @@ -284,13 +280,13 @@ def test_fetch_multi_input_feature_store(persist_torch_tensor: pathlib.Path) ->

request = InferenceRequest(
input_keys=[
FeatureStoreKey(key=tensor_name + "1", descriptor=fsd),
FeatureStoreKey(key=tensor_name + "2", descriptor=fsd),
FeatureStoreKey(key=tensor_name + "3", descriptor=fsd),
TensorKey(key=tensor_name + "1", descriptor=fsd),
TensorKey(key=tensor_name + "2", descriptor=fsd),
TensorKey(key=tensor_name + "3", descriptor=fsd),
]
)

model_key = FeatureStoreKey(key="test-model", descriptor=fsd)
model_key = ModelKey(key="test-model", descriptor=fsd)
batch = RequestBatch([request], None, model_key)

fetch_result = worker.fetch_inputs(batch, {fsd: feature_store})
Expand All @@ -310,9 +306,9 @@ def test_fetch_input_feature_store_missing() -> None:
key = "bad-key"
feature_store = MemoryFeatureStore()
fsd = feature_store.descriptor
request = InferenceRequest(input_keys=[FeatureStoreKey(key=key, descriptor=fsd)])
request = InferenceRequest(input_keys=[TensorKey(key=key, descriptor=fsd)])

model_key = FeatureStoreKey(key="test-model", descriptor=fsd)
model_key = ModelKey(key="test-model", descriptor=fsd)
batch = RequestBatch([request], None, model_key)

with pytest.raises(sse.SmartSimError) as ex:
Expand All @@ -332,9 +328,9 @@ def test_fetch_input_memory(persist_torch_tensor: pathlib.Path) -> None:

key = "test-model"
feature_store[key] = persist_torch_tensor.read_bytes()
request = InferenceRequest(input_keys=[FeatureStoreKey(key=key, descriptor=fsd)])
request = InferenceRequest(input_keys=[TensorKey(key=key, descriptor=fsd)])

model_key = FeatureStoreKey(key="test-model", descriptor=fsd)
model_key = ModelKey(key="test-model", descriptor=fsd)
batch = RequestBatch([request], None, model_key)

fetch_result = worker.fetch_inputs(batch, {fsd: feature_store})
Expand All @@ -351,9 +347,9 @@ def test_place_outputs() -> None:

# create a key to retrieve from the feature store
keys = [
FeatureStoreKey(key=key_name + "1", descriptor=fsd),
FeatureStoreKey(key=key_name + "2", descriptor=fsd),
FeatureStoreKey(key=key_name + "3", descriptor=fsd),
TensorKey(key=key_name + "1", descriptor=fsd),
TensorKey(key=key_name + "2", descriptor=fsd),
TensorKey(key=key_name + "3", descriptor=fsd),
]
data = [b"abcdef", b"ghijkl", b"mnopqr"]

Expand All @@ -376,6 +372,6 @@ def test_place_outputs() -> None:
pytest.param("key", "", id="invalid descriptor"),
],
)
def test_invalid_featurestorekey(key, descriptor) -> None:
def test_invalid_tensorkey(key, descriptor) -> None:
with pytest.raises(ValueError):
fsk = FeatureStoreKey(key, descriptor)
fsk = TensorKey(key, descriptor)
15 changes: 8 additions & 7 deletions tests/dragon/test_device_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@
)
from smartsim._core.mli.infrastructure.storage.feature_store import (
FeatureStore,
FeatureStoreKey,
ModelKey,
TensorKey,
)
from smartsim._core.mli.infrastructure.worker.worker import (
ExecuteResult,
Expand Down Expand Up @@ -116,9 +117,9 @@ def test_device_manager_model_in_request():

worker = MockWorker()

tensor_key = FeatureStoreKey(key="key", descriptor="desc")
output_key = FeatureStoreKey(key="key", descriptor="desc")
model_key = FeatureStoreKey(key="model key", descriptor="desc")
tensor_key = TensorKey(key="key", descriptor="desc")
output_key = TensorKey(key="key", descriptor="desc")
model_key = ModelKey(key="model key", descriptor="desc")

request = InferenceRequest(
model_key=model_key,
Expand Down Expand Up @@ -154,9 +155,9 @@ def test_device_manager_model_key():

worker = MockWorker()

tensor_key = FeatureStoreKey(key="key", descriptor="desc")
output_key = FeatureStoreKey(key="key", descriptor="desc")
model_key = FeatureStoreKey(key="model key", descriptor="desc")
tensor_key = TensorKey(key="key", descriptor="desc")
output_key = TensorKey(key="key", descriptor="desc")
model_key = ModelKey(key="model key", descriptor="desc")

request = InferenceRequest(
model_key=model_key,
Expand Down
11 changes: 6 additions & 5 deletions tests/dragon/test_error_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@
)
from smartsim._core.mli.infrastructure.storage.feature_store import (
FeatureStore,
FeatureStoreKey,
ModelKey,
TensorKey,
)
from smartsim._core.mli.infrastructure.worker.worker import (
ExecuteResult,
Expand Down Expand Up @@ -145,7 +146,7 @@ def setup_worker_manager_model_bytes(
batch_size=0,
)

model_id = FeatureStoreKey(key="key", descriptor=app_feature_store.descriptor)
model_id = ModelKey(key="key", descriptor=app_feature_store.descriptor)

request_batch = RequestBatch(
[inf_request],
Expand Down Expand Up @@ -190,9 +191,9 @@ def setup_worker_manager_model_key(
cooldown=3,
)

tensor_key = FeatureStoreKey(key="key", descriptor=app_feature_store.descriptor)
output_key = FeatureStoreKey(key="key", descriptor=app_feature_store.descriptor)
model_id = FeatureStoreKey(key="model key", descriptor=app_feature_store.descriptor)
tensor_key = TensorKey(key="key", descriptor=app_feature_store.descriptor)
output_key = TensorKey(key="key", descriptor=app_feature_store.descriptor)
model_id = ModelKey(key="model key", descriptor=app_feature_store.descriptor)

request = InferenceRequest(
model_key=model_id,
Expand Down
4 changes: 2 additions & 2 deletions tests/dragon/test_torch_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
from torch import nn
from torch.nn import functional as F

from smartsim._core.mli.infrastructure.storage.feature_store import FeatureStoreKey
from smartsim._core.mli.infrastructure.storage.feature_store import ModelKey
from smartsim._core.mli.infrastructure.worker.torch_worker import TorchWorker
from smartsim._core.mli.infrastructure.worker.worker import (
ExecuteResult,
Expand Down Expand Up @@ -109,7 +109,7 @@ def get_request() -> InferenceRequest:
]

return InferenceRequest(
model_key=FeatureStoreKey(key="model", descriptor="xyz"),
model_key=ModelKey(key="model", descriptor="xyz"),
callback=None,
raw_inputs=tensor_numpy,
input_keys=None,
Expand Down

0 comments on commit 4b6ade0

Please sign in to comment.