Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

☠️ Remove deprecated #2692

Merged
merged 3 commits into from
Jan 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions tests/test_trainers_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@ def test_dpo(self):
max_length=256,
max_prompt_length=64,
max_completion_length=64,
is_encoder_decoder=True,
disable_dropout=False,
# generate_during_eval=True, # ignore this one, it requires wandb
precompute_ref_log_probs=True,
Expand Down Expand Up @@ -188,7 +187,6 @@ def test_dpo(self):
self.assertEqual(trainer.args.max_length, 256)
self.assertEqual(trainer.args.max_prompt_length, 64)
self.assertEqual(trainer.args.max_completion_length, 64)
self.assertEqual(trainer.args.is_encoder_decoder, True)
self.assertEqual(trainer.args.disable_dropout, False)
# self.assertEqual(trainer.args.generate_during_eval, True)
self.assertEqual(trainer.args.precompute_ref_log_probs, True)
Expand Down
16 changes: 0 additions & 16 deletions trl/trainer/dpo_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Callable, Optional, Union
Expand Down Expand Up @@ -385,18 +384,3 @@ class DPOConfig(TrainingArguments):
"Comet during evaluation."
},
)

# Deprecated parameters
is_encoder_decoder: Optional[bool] = field(
default=None,
metadata={"help": "Deprecated. This argument is not used anymore."},
)

def __post_init__(self):
if self.is_encoder_decoder is not None:
warnings.warn(
"The `is_encoder_decoder` parameter is deprecated will be removed in version 0.15. The trainer now "
"automatically determines if the model is an encoder-decoder, so you can safely remove it."
)

return super().__post_init__()
4 changes: 0 additions & 4 deletions trl/trainer/orpo_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import EvalLoopOutput
from transformers.utils import is_peft_available, is_torch_fx_proxy
from transformers.utils.deprecation import deprecate_kwarg

from ..data_utils import maybe_apply_chat_template, maybe_extract_prompt
from ..models import PreTrainedModelWrapper
Expand Down Expand Up @@ -119,9 +118,6 @@ class ORPOTrainer(Trainer):

_tag_names = ["trl", "orpo"]

@deprecate_kwarg(
"tokenizer", "0.15.0", "processing_class", warn_if_greater_or_equal_version=True, raise_if_both_names=True
)
def __init__(
self,
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
Expand Down
9 changes: 0 additions & 9 deletions trl/trainer/ppo_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK
from transformers.trainer_callback import CallbackHandler, ExportableState, PrinterCallback
from transformers.utils import is_peft_available
from transformers.utils.deprecation import deprecate_kwarg

from ..core import masked_mean, masked_whiten
from ..models import create_reference_model
Expand Down Expand Up @@ -98,14 +97,6 @@ def forward(self, **kwargs):
class PPOTrainer(Trainer):
_tag_names = ["trl", "ppo"]

@deprecate_kwarg("config", "0.15.0", "args", warn_if_greater_or_equal_version=True, raise_if_both_names=True)
@deprecate_kwarg(
"tokenizer", "0.15.0", "processing_class", warn_if_greater_or_equal_version=True, raise_if_both_names=True
)
@deprecate_kwarg("policy", "0.15.0", "model", warn_if_greater_or_equal_version=True, raise_if_both_names=True)
@deprecate_kwarg(
"ref_policy", "0.15.0", "ref_model", warn_if_greater_or_equal_version=True, raise_if_both_names=True
)
def __init__(
self,
args: PPOConfig,
Expand Down
4 changes: 0 additions & 4 deletions trl/trainer/reward_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
from transformers.trainer_pt_utils import nested_detach
from transformers.trainer_utils import EvalPrediction
from transformers.utils import is_peft_available
from transformers.utils.deprecation import deprecate_kwarg

from ..data_utils import maybe_apply_chat_template
from .reward_config import RewardConfig
Expand Down Expand Up @@ -84,9 +83,6 @@ def _tokenize(batch: dict[str, list[Any]], tokenizer: "PreTrainedTokenizerBase")
class RewardTrainer(Trainer):
_tag_names = ["trl", "reward-trainer"]

@deprecate_kwarg(
"tokenizer", "0.15.0", "processing_class", warn_if_greater_or_equal_version=True, raise_if_both_names=True
)
def __init__(
self,
model: Optional[Union[PreTrainedModel, nn.Module]] = None,
Expand Down