Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add a make_cache_key parameter #159

Merged
merged 3 commits into from
May 31, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 23 additions & 16 deletions flask_caching/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,7 @@ def cached(
query_string=False,
hash_method=hashlib.md5,
cache_none=False,
make_cache_key=None,
):
"""Decorator. Use this to cache a function. By default the cache key
is `view/request.path`. You are able to use this decorator with any
Expand Down Expand Up @@ -333,6 +334,8 @@ def get_list():
**make_cache_key**
A function used in generating the cache_key used.

readable and writable

:param timeout: Default None. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.

Expand Down Expand Up @@ -379,6 +382,8 @@ def get_list():
check when cache.get returns None. This will likely
lead to wrongly returned None values in concurrent
situations and is not recommended to use.
:param make_cache_key: Default None. If set to a callable object,
it will be called to generate the cache key

"""

Expand All @@ -390,12 +395,11 @@ def decorated_function(*args, **kwargs):
return f(*args, **kwargs)

try:
if query_string:
cache_key = _make_cache_key_query_string()
if make_cache_key is not None and callable(make_cache_key):
cache_key = make_cache_key(*args, **kwargs)
else:
cache_key = _make_cache_key(
args, kwargs, use_request=True
)
cache_key = _make_cache_key(args, kwargs, use_request=True)


if (
callable(forced_update)
Expand Down Expand Up @@ -450,7 +454,7 @@ def decorated_function(*args, **kwargs):
)
return rv

def make_cache_key(*args, **kwargs):
def default_make_cache_key(*args, **kwargs):
# Convert non-keyword arguments (which is the way
# `make_cache_key` expects them) to keyword arguments
# (the way `url_for` expects them)
Expand Down Expand Up @@ -487,21 +491,24 @@ def _make_cache_key_query_string():
return cache_key

def _make_cache_key(args, kwargs, use_request):
if callable(key_prefix):
cache_key = key_prefix()
elif "%s" in key_prefix:
if use_request:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix % url_for(f.__name__, **kwargs)
if query_string:
return _make_cache_key_query_string()
else:
cache_key = key_prefix
if callable(key_prefix):
cache_key = key_prefix()
elif "%s" in key_prefix:
if use_request:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix % url_for(f.__name__, **kwargs)
else:
cache_key = key_prefix

return cache_key
return cache_key

decorated_function.uncached = f
decorated_function.cache_timeout = timeout
decorated_function.make_cache_key = make_cache_key
decorated_function.make_cache_key = default_make_cache_key

return decorated_function

Expand Down
56 changes: 56 additions & 0 deletions tests/test_view.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
import time
import hashlib

from flask import request

Expand Down Expand Up @@ -332,3 +333,58 @@ def view_works():
# ... making sure that different query parameter values
# don't yield the same cache!
assert not third_time == second_time


def test_generate_cache_key_from_request_body(app, cache):
"""Test a user supplied cache key maker.
Create three requests to verify that the same request body
always reference the same cache
Also test to make sure that the same cache isn't being used for
any/all query string parameters.
Caching functionality is verified by a `@cached` route `/works` which
produces a time in its response. The time in the response can verify that
two requests with the same request body produce responses with the same time.
"""

def _make_cache_key_request_body(argument):
"""Create keys based on request body."""
# now hash the request body so it can be
# used as a key for cache.
request_body = request.get_data(as_text=False)
hashed_body = str(hashlib.md5(request_body).hexdigest())
cache_key = request.path + hashed_body
return cache_key

@app.route('/works/<argument>', methods=['POST'])
@cache.cached(make_cache_key=_make_cache_key_request_body)
def view_works(argument):
return str(time.time()) + request.get_data().decode()

tc = app.test_client()

# Make our request...
first_response = tc.post(
'/works/arg', data=dict(mock=True, value=1, test=2)
)
first_time = first_response.get_data(as_text=True)

# Make the request...
second_response = tc.post(
'/works/arg', data=dict(mock=True, value=1, test=2)
)
second_time = second_response.get_data(as_text=True)

# Now make sure the time for the first and second
# requests are the same!
assert second_time == first_time

# Last/third request with different body should
# produce a different time.
third_response = tc.post(
'/works/arg', data=dict(mock=True, value=2, test=3)
)
third_time = third_response.get_data(as_text=True)

# ... making sure that different request bodies
# don't yield the same cache!
assert not third_time == second_time