From 188be7909c85ed7286c7c310a9c3c521d7272559 Mon Sep 17 00:00:00 2001 From: Samaneh Saadat Date: Thu, 23 May 2024 00:07:45 +0000 Subject: [PATCH] Fix a typo in CausalLM error handling. --- keras_nlp/src/models/causal_lm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras_nlp/src/models/causal_lm.py b/keras_nlp/src/models/causal_lm.py index 3493532e2c..c24150041e 100644 --- a/keras_nlp/src/models/causal_lm.py +++ b/keras_nlp/src/models/causal_lm.py @@ -352,9 +352,9 @@ def generate( raise ValueError( 'A `preprocessor` must be attached to the model if `stop_token_ids="auto"`. ' "Currently `preprocessor=None`. To call `generate()` with preprocessing " - "detached, either pass `stop_tokens_ids=None` to always generate until " + "detached, either pass `stop_token_ids=None` to always generate until " "`max_length` or pass a tuple of token ids that should terminate generation " - "as `stop_tokens_ids`." + "as `stop_token_ids`." ) elif stop_token_ids == "auto": stop_token_ids = [self.preprocessor.tokenizer.end_token_id]