Skip to content

Commit

Permalink
Remove uses of warnings.catch_warnings (#2690) (#2692)
Browse files Browse the repository at this point in the history
  • Loading branch information
dairiki authored May 3, 2023
1 parent 2051469 commit 3a4c8d0
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 46 deletions.
1 change: 1 addition & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Unreleased

- ``Authorization.from_header`` and ``WWWAuthenticate.from_header`` detects tokens
that end with base64 padding (``=``). :issue:`2685`
- Remove usage of ``warnings.catch_warnings``. :issue:`2690`


Version 2.3.3
Expand Down
60 changes: 23 additions & 37 deletions src/werkzeug/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,15 +371,13 @@ def encode(self, charset: str = "utf-8", errors: str = "replace") -> BytesURL:
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning)
return BytesURL(
self.scheme.encode("ascii"),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors),
)
return BytesURL(
self.scheme.encode("ascii"),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors),
)


class BytesURL(BaseURL):
Expand All @@ -406,15 +404,13 @@ def decode(self, charset: str = "utf-8", errors: str = "replace") -> URL:
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning)
return URL(
self.scheme.decode("ascii"), # type: ignore
self.decode_netloc(),
self.path.decode(charset, errors), # type: ignore
self.query.decode(charset, errors), # type: ignore
self.fragment.decode(charset, errors), # type: ignore
)
return URL(
self.scheme.decode("ascii"), # type: ignore
self.decode_netloc(),
self.path.decode(charset, errors), # type: ignore
self.query.decode(charset, errors), # type: ignore
self.fragment.decode(charset, errors), # type: ignore
)


_unquote_maps: dict[frozenset[int], dict[bytes, int]] = {frozenset(): _hextobyte}
Expand Down Expand Up @@ -546,9 +542,7 @@ def url_parse(

result_type = URL if is_text_based else BytesURL

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning)
return result_type(scheme, netloc, url, query, fragment)
return result_type(scheme, netloc, url, query, fragment)


def _make_fast_url_quote(
Expand Down Expand Up @@ -655,9 +649,7 @@ def url_quote_plus(
stacklevel=2,
)

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning)
return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")
return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")


def url_unparse(components: tuple[str, str, str, str, str]) -> str:
Expand Down Expand Up @@ -759,9 +751,7 @@ def url_unquote_plus(
else:
s = s.replace(b"+", b" ")

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning)
return url_unquote(s, charset, errors)
return url_unquote(s, charset, errors)


def url_fix(s: str, charset: str = "utf-8") -> str:
Expand Down Expand Up @@ -795,13 +785,11 @@ def url_fix(s: str, charset: str = "utf-8") -> str:
if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
s = f"file:///{s[7:]}"

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning)
url = url_parse(s)
path = url_quote(url.path, charset, safe="/%+$!*'(),")
qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))
url = url_parse(s)
path = url_quote(url.path, charset, safe="/%+$!*'(),")
qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))


def _codec_error_url_quote(e: UnicodeError) -> tuple[str, int]:
Expand Down Expand Up @@ -1171,9 +1159,7 @@ def url_decode_stream(

cls = MultiDict

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'make_chunk_iter", DeprecationWarning)
return cls(decoder)
return cls(decoder)


def _url_decode_impl(
Expand Down
8 changes: 2 additions & 6 deletions src/werkzeug/wsgi.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,9 +534,7 @@ def make_line_iter(
)
_iter = _make_chunk_iter(stream, limit, buffer_size)

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'_make_chunk_iter", DeprecationWarning)
first_item = next(_iter, "")
first_item = next(_iter, "")

if not first_item:
return
Expand Down Expand Up @@ -631,9 +629,7 @@ def make_chunk_iter(
)
_iter = _make_chunk_iter(stream, limit, buffer_size)

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "'_make_chunk_iter", DeprecationWarning)
first_item = next(_iter, b"")
first_item = next(_iter, b"")

if not first_item:
return
Expand Down
15 changes: 14 additions & 1 deletion tests/test_urls.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
import io
import warnings

import pytest

from werkzeug import urls
from werkzeug.datastructures import OrderedMultiDict

pytestmark = [pytest.mark.filterwarnings("ignore:'werkzeug:DeprecationWarning")]
pytestmark = [
pytest.mark.filterwarnings("ignore:'werkzeug:DeprecationWarning"),
pytest.mark.filterwarnings("ignore:'_?make_chunk_iter':DeprecationWarning"),
]


def test_parsing():
Expand Down Expand Up @@ -382,3 +386,12 @@ def test_iri_to_uri_dont_quote_valid_code_points():
# [] are not valid URL code points according to WhatWG URL Standard
# https://url.spec.whatwg.org/#url-code-points
assert urls.iri_to_uri("/path[bracket]?(paren)") == "/path%5Bbracket%5D?(paren)"


def test_url_parse_does_not_clear_warnings_registry(recwarn):
warnings.simplefilter("default")
warnings.simplefilter("ignore", DeprecationWarning)
for _ in range(2):
urls.url_parse("http://example.org/")
warnings.warn("test warning")
assert len(recwarn) == 1
10 changes: 8 additions & 2 deletions tests/test_wsgi.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,7 @@ def test_get_current_url_invalid_utf8():


@pytest.mark.filterwarnings("ignore:'make_line_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_make_chunk_iter:DeprecationWarning")
def test_multi_part_line_breaks():
data = b"abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
test_stream = io.BytesIO(data)
Expand All @@ -279,6 +280,7 @@ def test_multi_part_line_breaks():


@pytest.mark.filterwarnings("ignore:'make_line_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_make_chunk_iter:DeprecationWarning")
def test_multi_part_line_breaks_bytes():
data = b"abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
test_stream = io.BytesIO(data)
Expand All @@ -301,6 +303,7 @@ def test_multi_part_line_breaks_bytes():


@pytest.mark.filterwarnings("ignore:'make_line_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_make_chunk_iter:DeprecationWarning")
def test_multi_part_line_breaks_problematic():
data = b"abc\rdef\r\nghi"
for _ in range(1, 10):
Expand All @@ -310,13 +313,14 @@ def test_multi_part_line_breaks_problematic():


@pytest.mark.filterwarnings("ignore:'make_line_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_make_chunk_iter:DeprecationWarning")
def test_iter_functions_support_iterators():
data = ["abcdef\r\nghi", "jkl\r\nmnopqrstuvwxyz\r", "\nABCDEFGHIJK"]
lines = list(wsgi.make_line_iter(data))
assert lines == ["abcdef\r\n", "ghijkl\r\n", "mnopqrstuvwxyz\r\n", "ABCDEFGHIJK"]


@pytest.mark.filterwarnings("ignore:'make_chunk_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_?make_chunk_iter:DeprecationWarning")
def test_make_chunk_iter():
data = [b"abcdefXghi", b"jklXmnopqrstuvwxyzX", b"ABCDEFGHIJK"]
rv = list(wsgi.make_chunk_iter(data, b"X"))
Expand All @@ -328,7 +332,7 @@ def test_make_chunk_iter():
assert rv == [b"abcdef", b"ghijkl", b"mnopqrstuvwxyz", b"ABCDEFGHIJK"]


@pytest.mark.filterwarnings("ignore:'make_chunk_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_?make_chunk_iter:DeprecationWarning")
def test_make_chunk_iter_bytes():
data = [b"abcdefXghi", b"jklXmnopqrstuvwxyzX", b"ABCDEFGHIJK"]
rv = list(wsgi.make_chunk_iter(data, "X"))
Expand Down Expand Up @@ -362,6 +366,7 @@ def test_make_chunk_iter_bytes():


@pytest.mark.filterwarnings("ignore:'make_line_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_make_chunk_iter:DeprecationWarning")
def test_lines_longer_buffer_size():
data = b"1234567890\n1234567890\n"
for bufsize in range(1, 15):
Expand All @@ -372,6 +377,7 @@ def test_lines_longer_buffer_size():


@pytest.mark.filterwarnings("ignore:'make_line_iter:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:'_make_chunk_iter:DeprecationWarning")
def test_lines_longer_buffer_size_cap():
data = b"1234567890\n1234567890\n"
for bufsize in range(1, 15):
Expand Down

0 comments on commit 3a4c8d0

Please sign in to comment.