Skip to content

Commit

Permalink
add type annotations to tok. __init__
Browse files Browse the repository at this point in the history
  • Loading branch information
PhilipMay committed May 10, 2021
1 parent e60f0c4 commit e853ad3
Show file tree
Hide file tree
Showing 16 changed files with 16 additions and 16 deletions.
2 changes: 1 addition & 1 deletion src/transformers/models/albert/tokenization_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def __init__(
mask_token="[MASK]",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/barthez/tokenization_barthez.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def __init__(
mask_token="<mask>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def __init__(
sep_token="<::::>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

# Add extra_ids to the special token list
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/big_bird/tokenization_big_bird.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def __init__(
cls_token="[CLS]",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def __init__(
additional_special_tokens=["<s>NOTUSED", "</s>NOTUSED"],
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def __init__(
mask_token="[MASK]",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

super().__init__(
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/m2m_100/tokenization_m2m_100.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def __init__(
unk_token="<unk>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

super().__init__(
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/marian/tokenization_marian.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def __init__(
model_max_length=512,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

super().__init__(
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/mbart/tokenization_mbart50.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def __init__(
mask_token="<mask>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/pegasus/tokenization_pegasus.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def __init__(
offset=103, # entries 2 - 104 are only used for pretraining
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
self.offset = offset

if additional_special_tokens is not None:
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/reformer/tokenization_reformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def __init__(
additional_special_tokens=[],
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

super().__init__(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def __init__(
lang_codes=None,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

super().__init__(
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/t5/tokenization_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __init__(
additional_special_tokens=None,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def __init__(
mask_token="[MASK]",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

super().__init__(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def __init__(
mask_token="<mask>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/xlnet/tokenization_xlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def __init__(
additional_special_tokens=["<eop>", "<eod>"],
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
):
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

Expand Down

0 comments on commit e853ad3

Please sign in to comment.