Skip to content

Disable default cache when both cache and cache_seed are not set #1641

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1052,7 +1052,7 @@ def create(self, **config: Any) -> ModelClient.ModelClientResponseProtocol:
# construct the create params
params = self._construct_create_params(create_config, extra_kwargs)
# get the cache_seed, filter_func and context
cache_seed = extra_kwargs.get("cache_seed", LEGACY_DEFAULT_CACHE_SEED)
cache_seed = extra_kwargs.get("cache_seed")
cache = extra_kwargs.get("cache")
filter_func = extra_kwargs.get("filter_func")
context = extra_kwargs.get("context")
Expand Down
46 changes: 45 additions & 1 deletion test/oai/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def test_legacy_cache(credentials_gpt_4o_mini: Credentials):
shutil.rmtree(LEGACY_CACHE_DIR)

# Test default cache seed.
client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list)
client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list, cache_seed=LEGACY_DEFAULT_CACHE_SEED)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
Expand Down Expand Up @@ -303,6 +303,50 @@ def test_legacy_cache(credentials_gpt_4o_mini: Credentials):
assert os.path.exists(os.path.join(LEGACY_CACHE_DIR, str(21)))


@run_for_optional_imports(["openai"], "openai")
def test_no_default_cache(credentials_gpt_4o_mini: Credentials):
# Prompt to use for testing.
prompt = "Write a 100 word summary on the topic of the history of human civilization."

# Clear cache.
if os.path.exists(LEGACY_CACHE_DIR):
shutil.rmtree(LEGACY_CACHE_DIR)

# Test default cache which is no cache
client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list)
start_time = time.time()
no_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_no_cache = end_time - start_time

# Legacy cache should not be used.
assert not os.path.exists(os.path.join(LEGACY_CACHE_DIR, str(LEGACY_DEFAULT_CACHE_SEED)))

# Create cold cache
client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list, cache_seed=LEGACY_DEFAULT_CACHE_SEED)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_cold_cache = end_time - start_time

# Create warm cache
start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_warm_cache = end_time - start_time

# Test that warm cache is the same as cold cache.
assert cold_cache_response == warm_cache_response
assert no_cache_response != warm_cache_response

# Test that warm cache is faster than cold cache and no cache.
assert duration_with_warm_cache < duration_with_cold_cache
assert duration_with_warm_cache < duration_with_no_cache

# Test legacy cache is used.
assert os.path.exists(os.path.join(LEGACY_CACHE_DIR, str(LEGACY_DEFAULT_CACHE_SEED)))


@run_for_optional_imports("openai", "openai")
@run_for_optional_imports(["openai"], "openai")
def test_cache(credentials_gpt_4o_mini: Credentials):
Expand Down
Loading