Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated code #638

Merged
merged 6 commits into from
Nov 25, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 0 additions & 46 deletions tests/functional/test_self_supervised.py

This file was deleted.

57 changes: 0 additions & 57 deletions torchmetrics/functional/self_supervised.py

This file was deleted.

5 changes: 0 additions & 5 deletions torchmetrics/functional/text/wer.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def _wer_compute(errors: Tensor, total: Tensor) -> Tensor:
def wer(
predictions: Union[str, List[str]],
references: Union[str, List[str]],
concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7
) -> Tensor:
"""Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. This
value indicates the percentage of words that were incorrectly predicted. The lower the value, the better the
Expand All @@ -74,8 +73,6 @@ def wer(
Args:
predictions: Transcription(s) to score as a string or list of strings
references: Reference(s) for each speech input as a string or list of strings
concatenate_texts: Whether to concatenate all input texts or compute WER iteratively
This argument is deprecated in v0.6 and it will be removed in v0.7.

Returns:
Word error rate score
Expand All @@ -86,7 +83,5 @@ def wer(
>>> wer(predictions=predictions, references=references)
tensor(0.5000)
"""
if concatenate_texts is not None:
warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning)
errors, total = _wer_update(predictions, references)
return _wer_compute(errors, total)
13 changes: 0 additions & 13 deletions torchmetrics/text/rouge.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,11 @@ class ROUGEScore(Metric):
of the `rouge-score` package `Python ROUGE Implementation`

Args:
newline_sep:
New line separate the inputs.
This argument has not been in use any more. It is deprecated in v0.6 and will be removed in v0.7.
use_stemmer:
Use Porter stemmer to strip word suffixes to improve matching.
rouge_keys:
A list of rouge types to calculate.
Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``.
decimal_places:
The number of digits to round the computed the values to.
This argument has not been in usd any more. It is deprecated in v0.6 and will be removed in v0.7.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False. default: True
dist_sync_on_step:
Expand Down Expand Up @@ -82,10 +76,8 @@ class ROUGEScore(Metric):

def __init__(
self,
newline_sep: Optional[bool] = None, # remove in v0.7
use_stemmer: bool = False,
rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore
decimal_places: Optional[bool] = None, # remove in v0.7
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
Expand All @@ -97,11 +89,6 @@ def __init__(
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if newline_sep is not None:
warnings.warn("Argument `newline_sep` is deprecated in v0.6 and will be removed in v0.7")
if decimal_places is not None:
warnings.warn("Argument `decimal_places` is deprecated in v0.6 and will be removed in v0.7")

if use_stemmer or "rougeLsum" in rouge_keys:
if not _NLTK_AVAILABLE:
raise ValueError("Stemmer and/or `rougeLsum` requires that nltk is installed. Use `pip install nltk`.")
Expand Down
5 changes: 0 additions & 5 deletions torchmetrics/text/wer.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ class WER(Metric):
Compute WER score of transcribed segments against references.

Args:
concatenate_texts: Whether to concatenate all input texts or compute WER iteratively.
This argument is deprecated in v0.6 and it will be removed in v0.7.
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Expand Down Expand Up @@ -72,7 +70,6 @@ class WER(Metric):

def __init__(
self,
concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
Expand All @@ -84,8 +81,6 @@ def __init__(
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if concatenate_texts is not None:
warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")

Expand Down