From f38c4ad302dd34faef1137b13e9636f4408b0462 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Sun, 20 Dec 2020 10:28:28 -0800 Subject: [PATCH 1/8] better logging and help (#9203) --- examples/seq2seq/finetune_trainer.py | 1 + examples/seq2seq/utils.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/seq2seq/finetune_trainer.py b/examples/seq2seq/finetune_trainer.py index 5159b82ef3e9..30cc30353a5f 100755 --- a/examples/seq2seq/finetune_trainer.py +++ b/examples/seq2seq/finetune_trainer.py @@ -98,6 +98,7 @@ class DataTrainingArguments: metadata={ "help": "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." + " This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``" }, ) test_max_target_length: Optional[int] = field( diff --git a/examples/seq2seq/utils.py b/examples/seq2seq/utils.py index 8014463122fc..437cdf2e6323 100644 --- a/examples/seq2seq/utils.py +++ b/examples/seq2seq/utils.py @@ -434,7 +434,8 @@ def use_task_specific_params(model, task): if task_specific_params is not None: pars = task_specific_params.get(task, {}) - logger.info(f"using task specific params for {task}: {pars}") + logger.info(f"setting model.config to task specific params for {task}:\n {pars}") + logger.info("note: command line args may override some of these") model.config.update(pars) From a4b21cdd20328f71448123ce7c962a78a5d75612 Mon Sep 17 00:00:00 2001 From: Amog Kamsetty Date: Mon, 21 Dec 2020 01:39:30 -0800 Subject: [PATCH 2/8] [RAG] Add Ray implementation for distributed retrieval (#9197) * wip * wip * wip * wip * wip * wip * wip * wip * uncomment * uncomment * wip * updates * add docstring * updates * fix arg * fixes * add unit tests * update readme * update readme * update finetune script * update test * add test * add ray to test dependencies * separate ray and ray tune * formatting * shutdown ray at end of test * fix tests * formatting * formatting * even more formatting * address comments * formatting * add files * Update examples/research_projects/rag/test_distributed_retriever.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * address comments * addressing comments Co-authored-by: Ubuntu Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- .../legacy/pytorch-lightning/requirements.txt | 1 + examples/research_projects/rag/README.md | 38 ++++ .../rag/_test_finetune_rag.py | 16 +- ...er.py => distributed_pytorch_retriever.py} | 3 +- .../rag/distributed_ray_retriever.py | 154 +++++++++++++++ .../research_projects/rag/finetune_rag.py | 144 +++++++++++++- .../research_projects/rag/finetune_rag.sh | 6 +- .../research_projects/rag/finetune_rag_ray.sh | 44 +++++ .../rag/test_distributed_retriever.py | 179 +++++++++++++++--- src/transformers/__init__.py | 1 + src/transformers/integrations.py | 16 +- src/transformers/models/rag/retrieval_rag.py | 5 +- src/transformers/trainer.py | 6 +- src/transformers/trainer_utils.py | 4 +- 14 files changed, 561 insertions(+), 56 deletions(-) rename examples/research_projects/rag/{distributed_retriever.py => distributed_pytorch_retriever.py} (99%) create mode 100644 examples/research_projects/rag/distributed_ray_retriever.py create mode 100755 examples/research_projects/rag/finetune_rag_ray.sh diff --git a/examples/legacy/pytorch-lightning/requirements.txt b/examples/legacy/pytorch-lightning/requirements.txt index cb218847c67e..7a3030197745 100644 --- a/examples/legacy/pytorch-lightning/requirements.txt +++ b/examples/legacy/pytorch-lightning/requirements.txt @@ -19,3 +19,4 @@ pytest conllu sentencepiece != 0.1.92 protobuf +ray diff --git a/examples/research_projects/rag/README.md b/examples/research_projects/rag/README.md index 12da66fa7e35..c16104d3c062 100644 --- a/examples/research_projects/rag/README.md +++ b/examples/research_projects/rag/README.md @@ -50,6 +50,44 @@ python examples/rag/consolidate_rag_checkpoint.py \ ``` You will then be able to pass `path/to/checkpoint` as `model_name_or_path` to the `finetune_rag.py` script. +## Document Retrieval +When running distributed fine-tuning, each training worker needs to retrieve contextual documents +for its input by querying a index loaded into memory. RAG provides two implementations for document retrieval, +one with [`torch.distributed`](https://pytorch.org/docs/stable/distributed.html) communication package and the other +with [`Ray`](https://docs.ray.io/en/master/). + +This option can be configured with the `--distributed_retriever` flag which can either be set to `pytorch` or `ray`. +By default this flag is set to `pytorch`. + +For the Pytorch implementation, only training worker 0 loads the index into CPU memory, and a gather/scatter pattern is used +to collect the inputs from the other training workers and send back the corresponding document embeddings. + +For the Ray implementation, the index is loaded in *separate* process(es). The training workers randomly select which +retriever worker to query. To use Ray for distributed retrieval, you have to set the `--distributed_retriever` arg to `ray`. +To configure the number of retrieval workers (the number of processes that load the index), you can set the `num_retrieval_workers` flag. +Also make sure to start the Ray cluster before running fine-tuning. + +```bash +# Start a single-node Ray cluster. +ray start --head + +python examples/rag/finetune_rag.py \ + --data_dir $DATA_DIR \ + --output_dir $OUTPUT_DIR \ + --model_name_or_path $MODEL_NAME_OR_PATH \ + --model_type rag_sequence \ + --fp16 \ + --gpus 8 + --distributed_retriever ray \ + --num_retrieval_workers 4 + +# Stop the ray cluster once fine-tuning has finished. +ray stop +``` + +Using Ray can lead to retrieval speedups on multi-GPU settings since multiple processes load the index rather than +just the rank 0 training worker. Using Ray also allows you to load the index on GPU since the index is loaded on a separate +processes than the model, while with pytorch distributed retrieval, both are loaded in the same process potentially leading to GPU OOM. # Evaluation Our evaluation script enables two modes of evaluation (controlled by the `eval_mode` argument): `e2e` - end2end evaluation, returns EM (exact match) and F1 scores calculated for the downstream task and `retrieval` - which returns precision@k of the documents retrieved for provided inputs. diff --git a/examples/research_projects/rag/_test_finetune_rag.py b/examples/research_projects/rag/_test_finetune_rag.py index 164ecfd93211..1be5ecbb89db 100644 --- a/examples/research_projects/rag/_test_finetune_rag.py +++ b/examples/research_projects/rag/_test_finetune_rag.py @@ -9,6 +9,7 @@ from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, + require_ray, require_torch_gpu, require_torch_multi_gpu, ) @@ -29,7 +30,7 @@ def _create_dummy_data(self, data_dir): with open(os.path.join(data_dir, f"{split}.{field}"), "w") as f: f.write(content) - def _run_finetune(self, gpus: int): + def _run_finetune(self, gpus: int, distributed_retriever: str = "pytorch"): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @@ -66,6 +67,7 @@ def _run_finetune(self, gpus: int): --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ + --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: @@ -94,3 +96,15 @@ def test_finetune_gpu(self): def test_finetune_multigpu(self): result = self._run_finetune(gpus=2) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) + + @require_torch_gpu + @require_ray + def test_finetune_gpu_ray_retrieval(self): + result = self._run_finetune(gpus=1, distributed_retriever="ray") + self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) + + @require_torch_multi_gpu + @require_ray + def test_finetune_multigpu_ray_retrieval(self): + result = self._run_finetune(gpus=1, distributed_retriever="ray") + self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) diff --git a/examples/research_projects/rag/distributed_retriever.py b/examples/research_projects/rag/distributed_pytorch_retriever.py similarity index 99% rename from examples/research_projects/rag/distributed_retriever.py rename to examples/research_projects/rag/distributed_pytorch_retriever.py index cedd2c33409f..0edbc969a5d0 100644 --- a/examples/research_projects/rag/distributed_retriever.py +++ b/examples/research_projects/rag/distributed_pytorch_retriever.py @@ -31,14 +31,13 @@ class RagPyTorchDistributedRetriever(RagRetriever): If specified, use this index instead of the one built using the configuration """ - _init_retrieval = False - def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None): super().__init__( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, + init_retrieval=False, ) self.process_group = None diff --git a/examples/research_projects/rag/distributed_ray_retriever.py b/examples/research_projects/rag/distributed_ray_retriever.py new file mode 100644 index 000000000000..69fd719cbcc4 --- /dev/null +++ b/examples/research_projects/rag/distributed_ray_retriever.py @@ -0,0 +1,154 @@ +import logging +import random + +import ray +from transformers import RagConfig, RagRetriever, RagTokenizer +from transformers.file_utils import requires_datasets, requires_faiss +from transformers.models.rag.retrieval_rag import CustomHFIndex + + +logger = logging.getLogger(__name__) + + +class RayRetriever: + def __init__(self): + self.initialized = False + + def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index): + if not self.initialized: + self.retriever = RagRetriever( + config, + question_encoder_tokenizer=question_encoder_tokenizer, + generator_tokenizer=generator_tokenizer, + index=index, + init_retrieval=False, + ) + self.initialized = True + + def init_retrieval(self): + self.retriever.index.init_index() + + def retrieve(self, question_hidden_states, n_docs): + doc_ids, retrieved_doc_embeds = self.retriever._main_retrieve(question_hidden_states, n_docs) + return doc_ids, retrieved_doc_embeds + + +class RagRayDistributedRetriever(RagRetriever): + """ + A distributed retriever built on top of the ``Ray`` API, a library + for building distributed applications (https://docs.ray.io/en/master/). + package. During training, all training workers initialize their own + instance of a `RagRayDistributedRetriever`, and each instance of + this distributed retriever shares a common set of Retrieval Ray + Actors (https://docs.ray.io/en/master/walkthrough.html#remote + -classes-actors) that load the index on separate processes. Ray + handles the communication between the `RagRayDistributedRetriever` + instances and the remote Ray actors. If training is done in a + non-distributed setup, the index will simply be loaded in the same + process as the training worker and Ray will not be used. + + Args: + config (:class:`~transformers.RagConfig`): + The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build. + question_encoder_tokenizer (:class:`~transformers.PretrainedTokenizer`): + The tokenizer that was used to tokenize the question. + It is used to decode the question and then use the generator_tokenizer. + generator_tokenizer (:class:`~transformers.PretrainedTokenizer`): + The tokenizer used for the generator part of the RagModel. + retrieval_workers (:obj:`List[ray.ActorClass(RayRetriever)]`): A list of already initialized `RayRetriever` actors. + These actor classes run on remote processes and are responsible for performing the index lookup. + index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): + If specified, use this index instead of the one built using the configuration + """ + + def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None): + if index is not None and index.is_initialized() and len(retrieval_workers) > 0: + raise ValueError( + "When using Ray for distributed fine-tuning, " + "you'll need to provide the paths instead, " + "as the dataset and the index are loaded " + "separately. More info in examples/rag/use_own_knowledge_dataset.py " + ) + super().__init__( + config, + question_encoder_tokenizer=question_encoder_tokenizer, + generator_tokenizer=generator_tokenizer, + index=index, + init_retrieval=False, + ) + self.retrieval_workers = retrieval_workers + if len(self.retrieval_workers) > 0: + ray.get( + [ + worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index) + for worker in self.retrieval_workers + ] + ) + + def init_retrieval(self): + """ + Retriever initialization function, needs to be called from the + training process. This function triggers retrieval initialization + for all retrieval actors if using distributed setting, or loads + index into current process if training is not distributed. + """ + logger.info("initializing retrieval") + + if len(self.retrieval_workers) > 0: + ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) + else: + # Non-distributed training. Load index into this same process. + self.index.init_index() + + def retrieve(self, question_hidden_states, n_docs): + """ + Retrieves documents for specified ``question_hidden_states``. If + running training with multiple workers, a random retrieval actor is + selected to perform the index lookup and return the result. + + Args: + question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`): + A batch of query vectors to retrieve with. + n_docs (:obj:`int`): + The number of docs retrieved per query. + + Output: + retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)` + The retrieval embeddings of the retrieved docs per query. + doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`) + The ids of the documents in the index + doc_dicts (:obj:`List[dict]`): + The retrieved_doc_embeds examples per query. + """ + if len(self.retrieval_workers) > 0: + # Select a random retrieval actor. + random_worker = self.retrieval_workers[random.randint(0, len(self.retrieval_workers) - 1)] + doc_ids, retrieved_doc_embeds = ray.get(random_worker.retrieve.remote(question_hidden_states, n_docs)) + else: + doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) + return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) + + @classmethod + def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): + return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs) + + @classmethod + def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs): + requires_datasets(cls) + requires_faiss(cls) + config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) + rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) + question_encoder_tokenizer = rag_tokenizer.question_encoder + generator_tokenizer = rag_tokenizer.generator + if indexed_dataset is not None: + config.index_name = "custom" + index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) + else: + index = cls._build_index(config) + return cls( + config, + question_encoder_tokenizer=question_encoder_tokenizer, + generator_tokenizer=generator_tokenizer, + retrieval_workers=actor_handles, + index=index, + ) diff --git a/examples/research_projects/rag/finetune_rag.py b/examples/research_projects/rag/finetune_rag.py index b62da19688ce..ef4c1a37854a 100644 --- a/examples/research_projects/rag/finetune_rag.py +++ b/examples/research_projects/rag/finetune_rag.py @@ -29,6 +29,12 @@ T5ForConditionalGeneration, ) from transformers import logging as transformers_logging +from transformers.integrations import is_ray_available + + +if is_ray_available(): + import ray + from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever from callbacks_rag import ( # noqa: E402 # isort:skipq @@ -36,7 +42,8 @@ get_early_stopping_callback, Seq2SeqLoggingCallback, ) -from distributed_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip + +from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip from utils_rag import ( # noqa: E402 # isort:skip calculate_exact_match, flatten_list, @@ -88,7 +95,12 @@ def init_ddp_connection(self, global_rank: int, world_size: int, is_slurm_managi os.environ["MASTER_PORT"] = str(self.distributed_port) super().init_ddp_connection(global_rank, world_size, is_slurm_managing_tasks) if module.is_rag_model: - module.model.rag.retriever.init_retrieval(self.distributed_port) + if module.distributed_retriever == "pytorch": + module.model.rag.retriever.init_retrieval(self.distributed_port) + elif module.distributed_retriever == "ray" and global_rank == 0: + # For the Ray retriever, only initialize it once when global + # rank is 0. + module.model.rag.retriever.init_retrieval() class GenerativeQAModule(BaseTransformer): @@ -127,7 +139,13 @@ def __init__(self, hparams, **kwargs): config.generator.prefix = hparams.prefix config.label_smoothing = hparams.label_smoothing hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator) - retriever = RagPyTorchDistributedRetriever.from_pretrained(hparams.model_name_or_path, config=config) + if hparams.distributed_retriever == "pytorch": + retriever = RagPyTorchDistributedRetriever.from_pretrained(hparams.model_name_or_path, config=config) + elif hparams.distributed_retriever == "ray": + # The Ray retriever needs the handles to the retriever actors. + retriever = RagRayDistributedRetriever.from_pretrained( + hparams.model_name_or_path, hparams.actor_handles, config=config + ) model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever) prefix = config.question_encoder.prefix else: @@ -180,7 +198,12 @@ def __init__(self, hparams, **kwargs): # For single GPU training, init_ddp_connection is not called. # So we need to initialize the retrievers here. if hparams.gpus <= 1: - self.model.retriever.init_retrieval(self.distributed_port) + if hparams.distributed_retriever == "ray": + self.model.retriever.init_retrieval() + elif hparams.distributed_retriever == "pytorch": + self.model.retriever.init_retrieval(self.distributed_port) + + self.distributed_retriever = hparams.distributed_retriever def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) @@ -420,6 +443,7 @@ def add_model_specific_args(parser, root_dir): type=str, help="RAG model type: sequence or token, if none specified, the type is inferred from the model_name_or_path", ) + return parser @staticmethod @@ -442,12 +466,58 @@ def add_retriever_specific_args(parser): default=None, help="Path to the faiss index for custom index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`", ) + parser.add_argument( + "--distributed_retriever", + choices=["ray", "pytorch"], + type=str, + default="pytorch", + help="What implementation to use for distributed retriever? If " + "pytorch is selected, the index is loaded on training " + "worker 0, and torch.distributed is used to handle " + "communication between training worker 0, and the other " + "training workers. If ray is selected, the Ray library is " + "used to create load the index on separate processes, " + "and Ray handles the communication between the training " + "workers and the retrieval actors.", + ) parser.add_argument( "--use_dummy_dataset", type=bool, default=False, help="Whether to use the dummy version of the dataset index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`", ) + + parser.add_argument( + "--num_retrieval_workers", + type=int, + default=1, + help="The number of retrieval actors to use when Ray is selected" + "for the distributed retriever. Has no effect when " + "distributed_retriever is set to pytorch.", + ) + + @staticmethod + def add_ray_specific_args(parser): + parser.add_argument( + "--num_retrieval_workers", + type=int, + default=1, + help="The number of retrieval actors to use when Ray is selected" + "for the distributed retriever. Has no effect when " + "distributed_retriever is set to pytorch.", + ) + + # Ray cluster address. + parser.add_argument( + "--ray-address", + default="auto", + type=str, + help="The address of the Ray cluster to connect to. If not " + "specified, Ray will attempt to automatically detect the " + "cluster. Has no effect if pytorch is used as the distributed " + "retriever.", + ) + return parser @@ -461,6 +531,46 @@ def main(args=None, model=None) -> GenerativeQAModule: args = args or parser.parse_args() Path(args.output_dir).mkdir(exist_ok=True) + + named_actors = [] + if args.distributed_retriever == "ray" and args.gpus > 1: + if not is_ray_available(): + raise RuntimeError("Please install Ray to use the Ray " "distributed retriever.") + # Connect to an existing Ray cluster. + try: + ray.init(address=args.ray_address) + except (ConnectionError, ValueError): + logger.warning( + "Connection to Ray cluster failed. Make sure a Ray" + "cluster is running by either using Ray's cluster " + "launcher (`ray up`) or by manually starting Ray on " + "each node via `ray start --head` for the head node " + "and `ray start --address=':6379'` for " + "additional nodes. See " + "https://docs.ray.io/en/master/cluster/index.html " + "for more info." + ) + raise + + # Create Ray actors only for rank 0. + if ("LOCAL_RANK" not in os.environ or os.environ["LOCAL_RANK"] == 0) and ( + "NODE_RANK" not in os.environ or os.environ["NODE_RANK"] == 0 + ): + remote_cls = ray.remote(RayRetriever) + named_actors = [ + remote_cls.options(name="retrieval_worker_{}".format(i)).remote() + for i in range(args.num_retrieval_workers) + ] + else: + logger.info( + "Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format( + os.environ["NODE_RANK"], os.environ["LOCAL_RANK"] + ) + ) + named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)] + args.actor_handles = named_actors + assert args.actor_handles == named_actors + if model is None: model: GenerativeQAModule = GenerativeQAModule(args) @@ -471,17 +581,17 @@ def main(args=None, model=None) -> GenerativeQAModule: or str(args.output_dir).startswith("/tmp") or str(args.output_dir).startswith("/var") ): - logger = True # don't pollute wandb logs unnecessarily + training_logger = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger project = os.environ.get("WANDB_PROJECT", dataset) - logger = WandbLogger(name=model.output_dir.name, project=project) + training_logger = WandbLogger(name=model.output_dir.name, project=project) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger - logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") + training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") es_callback = ( get_early_stopping_callback(model.val_metric, args.early_stopping_patience) @@ -495,8 +605,9 @@ def main(args=None, model=None) -> GenerativeQAModule: logging_callback=Seq2SeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric), early_stopping_callback=es_callback, - logger=logger, + logger=training_logger, accelerator=CustomAccel() if args.gpus > 1 else None, + profiler=pl.profiler.AdvancedProfiler() if args.profile else None, ) pickle_save(model.hparams, model.output_dir / "hparams.pkl") @@ -509,4 +620,19 @@ def main(args=None, model=None) -> GenerativeQAModule: if __name__ == "__main__": - main() + parser = argparse.ArgumentParser() + parser = pl.Trainer.add_argparse_args(parser) + parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) + parser = GenerativeQAModule.add_retriever_specific_args(parser) + parser = GenerativeQAModule.add_ray_specific_args(parser) + + # Pytorch Lightning Profiler + parser.add_argument( + "--profile", + action="store_true", + help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.", + ) + + args = parser.parse_args() + + main(args) diff --git a/examples/research_projects/rag/finetune_rag.sh b/examples/research_projects/rag/finetune_rag.sh index 577b6ebd0dbd..8fd1fea3e546 100755 --- a/examples/research_projects/rag/finetune_rag.sh +++ b/examples/research_projects/rag/finetune_rag.sh @@ -2,7 +2,7 @@ export PYTHONPATH="../":"${PYTHONPATH}" # A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path -# run ./examples/rag/finetune.sh --help to see all the possible options +# run ./examples/rag/finetune_rag.sh --help to see all the possible options python examples/rag/finetune_rag.py \ --data_dir $DATA_DIR \ @@ -11,10 +11,10 @@ python examples/rag/finetune_rag.py \ --model_type rag_sequence \ --fp16 \ --gpus 8 \ + --profile \ --do_train \ --do_predict \ --n_val -1 \ - --val_check_interval 0.25 \ --train_batch_size 8 \ --eval_batch_size 1 \ --max_source_length 128 \ @@ -31,4 +31,4 @@ python examples/rag/finetune_rag.py \ --learning_rate 3e-05 \ --num_train_epochs 100 \ --warmup_steps 500 \ - --gradient_accumulation_steps 1 \ No newline at end of file + --gradient_accumulation_steps 1 \ diff --git a/examples/research_projects/rag/finetune_rag_ray.sh b/examples/research_projects/rag/finetune_rag_ray.sh new file mode 100755 index 000000000000..7c8e7b97e77c --- /dev/null +++ b/examples/research_projects/rag/finetune_rag_ray.sh @@ -0,0 +1,44 @@ +# Sample script to finetune RAG using Ray for distributed retrieval. + +# Add parent directory to python path to access lightning_base.py +export PYTHONPATH="../":"${PYTHONPATH}" + +# Start a single-node Ray cluster. +ray start --head + +# A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path +# run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options + +python examples/rag/finetune_rag.py \ + --data_dir $DATA_DIR \ + --output_dir $OUTPUT_DIR \ + --model_name_or_path $MODEL_NAME_OR_PATH \ + --model_type rag_sequence \ + --fp16 \ + --gpus 8 \ + --profile \ + --do_train \ + --do_predict \ + --n_val -1 \ + --train_batch_size 8 \ + --eval_batch_size 1 \ + --max_source_length 128 \ + --max_target_length 25 \ + --val_max_target_length 25 \ + --test_max_target_length 25 \ + --label_smoothing 0.1 \ + --dropout 0.1 \ + --attention_dropout 0.1 \ + --weight_decay 0.001 \ + --adam_epsilon 1e-08 \ + --max_grad_norm 0.1 \ + --lr_scheduler polynomial \ + --learning_rate 3e-05 \ + --num_train_epochs 100 \ + --warmup_steps 500 \ + --gradient_accumulation_steps 1 \ + --distributed_retriever ray \ + --num_retrieval_workers 4 + +# Stop the Ray cluster. +ray stop diff --git a/examples/research_projects/rag/test_distributed_retriever.py b/examples/research_projects/rag/test_distributed_retriever.py index e7a5d9ba91a3..8865a3098959 100644 --- a/examples/research_projects/rag/test_distributed_retriever.py +++ b/examples/research_projects/rag/test_distributed_retriever.py @@ -13,15 +13,27 @@ import faiss from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig from transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available +from transformers.integrations import is_ray_available from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES -from transformers.models.rag.retrieval_rag import CustomHFIndex +from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES -from transformers.testing_utils import require_torch_non_multi_gpu_but_fix_me +from transformers.testing_utils import require_ray, require_torch_non_multi_gpu_but_fix_me sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip -from distributed_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip +if is_torch_available(): + from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip +else: + RagPyTorchDistributedRetriever = None + +if is_ray_available(): + import ray # noqa: E402 # isort:skip + from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever # noqa: E402 # isort:skip +else: + ray = None + RagRayDistributedRetriever = None + RayRetriever = None def require_distributed_retrieval(test_case): @@ -32,8 +44,8 @@ def require_distributed_retrieval(test_case): These tests are skipped when respective libraries are not installed. """ - if not (is_torch_available() and is_datasets_available() and is_faiss_available() and is_psutil_available()): - test_case = unittest.skip("test requires PyTorch, Datasets, Faiss, psutil")(test_case) + if not (is_datasets_available() and is_faiss_available() and is_psutil_available()): + test_case = unittest.skip("test requires Datasets, Faiss, psutil")(test_case) return test_case @@ -144,7 +156,31 @@ def get_dummy_pytorch_distributed_retriever( retriever.init_retrieval(port) return retriever - def get_dummy_custom_hf_index_retriever(self, init_retrieval: bool, from_disk: bool, port=12345): + def get_dummy_ray_distributed_retriever(self, init_retrieval: bool) -> RagRayDistributedRetriever: + # Have to run in local mode because sys.path modifications at top of + # file are not propogated to remote workers. + # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder + ray.init(local_mode=True) + config = RagConfig( + retrieval_vector_size=self.retrieval_vector_size, + question_encoder=DPRConfig().to_dict(), + generator=BartConfig().to_dict(), + ) + remote_cls = ray.remote(RayRetriever) + workers = [remote_cls.remote() for _ in range(1)] + with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: + mock_load_dataset.return_value = self.get_dummy_dataset() + retriever = RagRayDistributedRetriever( + config, + question_encoder_tokenizer=self.get_dpr_tokenizer(), + generator_tokenizer=self.get_bart_tokenizer(), + retrieval_workers=workers, + ) + if init_retrieval: + retriever.init_retrieval() + return retriever + + def get_dummy_custom_hf_index_pytorch_retriever(self, init_retrieval: bool, from_disk: bool, port=12345): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, @@ -175,13 +211,51 @@ def get_dummy_custom_hf_index_retriever(self, init_retrieval: bool, from_disk: b retriever.init_retrieval(port) return retriever - @require_torch_non_multi_gpu_but_fix_me - def test_pytorch_distributed_retriever_retrieve(self): - n_docs = 1 - retriever = self.get_dummy_pytorch_distributed_retriever(init_retrieval=True) - hidden_states = np.array( - [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 + def get_dummy_custom_hf_index_ray_retriever(self, init_retrieval: bool, from_disk: bool): + # Have to run in local mode because sys.path modifications at top of + # file are not propogated to remote workers. + # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder + ray.init(local_mode=True) + dataset = self.get_dummy_dataset() + config = RagConfig( + retrieval_vector_size=self.retrieval_vector_size, + question_encoder=DPRConfig().to_dict(), + generator=BartConfig().to_dict(), + index_name="custom", ) + remote_cls = ray.remote(RayRetriever) + workers = [remote_cls.remote() for _ in range(1)] + if from_disk: + config.passages_path = os.path.join(self.tmpdirname, "dataset") + config.index_path = os.path.join(self.tmpdirname, "index.faiss") + dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) + dataset.drop_index("embeddings") + dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) + del dataset + retriever = RagRayDistributedRetriever( + config, + question_encoder_tokenizer=self.get_dpr_tokenizer(), + generator_tokenizer=self.get_bart_tokenizer(), + retrieval_workers=workers, + index=CustomHFIndex.load_from_disk( + vector_size=config.retrieval_vector_size, + dataset_path=config.passages_path, + index_path=config.index_path, + ), + ) + else: + retriever = RagRayDistributedRetriever( + config, + question_encoder_tokenizer=self.get_dpr_tokenizer(), + generator_tokenizer=self.get_bart_tokenizer(), + retrieval_workers=workers, + index=CustomHFIndex(config.retrieval_vector_size, dataset), + ) + if init_retrieval: + retriever.init_retrieval() + return retriever + + def distributed_retriever_check(self, retriever: RagRetriever, hidden_states: np.array, n_docs: int) -> None: retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) @@ -192,33 +266,76 @@ def test_pytorch_distributed_retriever_retrieve(self): self.assertListEqual(doc_ids.tolist(), [[1], [0]]) @require_torch_non_multi_gpu_but_fix_me - def test_custom_hf_index_retriever_retrieve(self): + def test_pytorch_distributed_retriever_retrieve(self): + n_docs = 1 + hidden_states = np.array( + [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 + ) + + self.distributed_retriever_check( + self.get_dummy_pytorch_distributed_retriever(init_retrieval=True), hidden_states, n_docs + ) + + @require_torch_non_multi_gpu_but_fix_me + def test_custom_hf_index_pytorch_retriever_retrieve(self): n_docs = 1 - retriever = self.get_dummy_custom_hf_index_retriever(init_retrieval=True, from_disk=False) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) - retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) - self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) - self.assertEqual(len(doc_dicts), 2) - self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) - self.assertEqual(len(doc_dicts[0]["id"]), n_docs) - self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc - self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc - self.assertListEqual(doc_ids.tolist(), [[1], [0]]) + + self.distributed_retriever_check( + self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=False), + hidden_states, + n_docs, + ) @require_torch_non_multi_gpu_but_fix_me def test_custom_pytorch_distributed_retriever_retrieve_from_disk(self): n_docs = 1 - retriever = self.get_dummy_custom_hf_index_retriever(init_retrieval=True, from_disk=True) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) - retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) - self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) - self.assertEqual(len(doc_dicts), 2) - self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) - self.assertEqual(len(doc_dicts[0]["id"]), n_docs) - self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc - self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc - self.assertListEqual(doc_ids.tolist(), [[1], [0]]) + + self.distributed_retriever_check( + self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=True), + hidden_states, + n_docs, + ) + + @require_ray + def test_ray_distributed_retriever_retrieve(self): + n_docs = 1 + hidden_states = np.array( + [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 + ) + + self.distributed_retriever_check( + self.get_dummy_ray_distributed_retriever(init_retrieval=True), hidden_states, n_docs + ) + ray.shutdown() + + @require_ray + def test_custom_hf_index_ray_retriever_retrieve(self): + n_docs = 1 + hidden_states = np.array( + [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 + ) + with self.assertRaises(ValueError): + self.distributed_retriever_check( + self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=False), + hidden_states, + n_docs, + ) + ray.shutdown() + + @require_ray + def test_custom_ray_distributed_retriever_retrieve_from_disk(self): + n_docs = 1 + hidden_states = np.array( + [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 + ) + + self.distributed_retriever_check( + self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=True), hidden_states, n_docs + ) + ray.shutdown() diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 0e39ed7ba5a9..4586fe5363f3 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -219,6 +219,7 @@ is_comet_available, is_optuna_available, is_ray_available, + is_ray_tune_available, is_tensorboard_available, is_wandb_available, ) diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index ecc2a9f635a6..2d673087e832 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -63,8 +63,16 @@ import ray # noqa: F401 _has_ray = True + try: + # Ray Tune has additional dependencies. + from ray import tune # noqa: F401 + + _has_ray_tune = True + except (ImportError): + _has_ray_tune = False except (ImportError): _has_ray = False + _has_ray_tune = False try: from torch.utils.tensorboard import SummaryWriter # noqa: F401 @@ -127,6 +135,10 @@ def is_ray_available(): return _has_ray +def is_ray_tune_available(): + return _has_ray_tune + + def is_azureml_available(): return _has_azureml @@ -143,7 +155,7 @@ def hp_params(trial): if is_optuna_available(): if isinstance(trial, optuna.Trial): return trial.params - if is_ray_available(): + if is_ray_tune_available(): if isinstance(trial, dict): return trial @@ -153,7 +165,7 @@ def hp_params(trial): def default_hp_search_backend(): if is_optuna_available(): return "optuna" - elif is_ray_available(): + elif is_ray_tune_available(): return "ray" diff --git a/src/transformers/models/rag/retrieval_rag.py b/src/transformers/models/rag/retrieval_rag.py index 8db18a1d65d9..ff85560e5933 100644 --- a/src/transformers/models/rag/retrieval_rag.py +++ b/src/transformers/models/rag/retrieval_rag.py @@ -370,9 +370,8 @@ class RagRetriever: """ - _init_retrieval = True - - def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None): + def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True): + self._init_retrieval = init_retrieval requires_datasets(self) requires_faiss(self) super().__init__() diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 8cbc8ea299b5..10a911bd2743 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -37,7 +37,7 @@ is_fairscale_available, is_mlflow_available, is_optuna_available, - is_ray_available, + is_ray_tune_available, is_tensorboard_available, is_wandb_available, run_hp_search_optuna, @@ -145,7 +145,7 @@ if is_optuna_available(): import optuna -if is_ray_available(): +if is_ray_tune_available(): from ray import tune if is_azureml_available(): @@ -1062,7 +1062,7 @@ def hyperparameter_search( backend = HPSearchBackend(backend) if backend == HPSearchBackend.OPTUNA and not is_optuna_available(): raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.") - if backend == HPSearchBackend.RAY and not is_ray_available(): + if backend == HPSearchBackend.RAY and not is_ray_tune_available(): raise RuntimeError( "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`." ) diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index a63959f48cce..4dc7874a0d59 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -132,9 +132,9 @@ def default_hp_space_optuna(trial) -> Dict[str, float]: def default_hp_space_ray(trial) -> Dict[str, float]: - from .integrations import is_ray_available + from .integrations import is_ray_tune_available - assert is_ray_available(), "This function needs ray installed: `pip install ray[tune]`" + assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`" from ray import tune return { From 6b034309ca4ca2ec6e5c3cacda92a448fa10b921 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 21 Dec 2020 10:41:34 +0100 Subject: [PATCH 3/8] fix warning (#9231) --- src/transformers/models/t5/modeling_t5.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index 491ece4d9948..0ce2be3c62ac 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -1127,6 +1127,8 @@ class T5Model(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", + ] + _keys_to_ignore_on_load_unexpected = [ r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", ] @@ -1300,6 +1302,8 @@ class T5ForConditionalGeneration(T5PreTrainedModel): r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight", + ] + _keys_to_ignore_on_load_unexpected = [ r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", ] From 5a8a4eb18746d158faa3331748a28c4ccd88d063 Mon Sep 17 00:00:00 2001 From: Julien Plu Date: Mon, 21 Dec 2020 13:10:15 +0100 Subject: [PATCH 4/8] Improve BERT-like models performance with better self attention (#9124) * Improve BERT-like models attention layers * Apply style * Put back error raising instead of assert * Update template * Fix copies * Apply raising valueerror in MPNet * Restore the copy check for the Intermediate layer in Longformer * Update longformer --- setup.py | 4 +- src/transformers/dependency_versions_table.py | 4 +- .../models/bert/modeling_tf_bert.py | 127 +++++++++-------- .../models/electra/modeling_tf_electra.py | 133 ++++++++++-------- .../longformer/modeling_tf_longformer.py | 31 ++-- .../models/mpnet/modeling_tf_mpnet.py | 90 ++++++------ .../models/roberta/modeling_tf_roberta.py | 133 ++++++++++-------- ...tf_{{cookiecutter.lowercase_modelname}}.py | 97 +++++++------ 8 files changed, 348 insertions(+), 271 deletions(-) diff --git a/setup.py b/setup.py index 2b025c221a65..860cb8e4a12d 100644 --- a/setup.py +++ b/setup.py @@ -127,8 +127,8 @@ "sphinx-rtd-theme==0.4.3", # sphinx-rtd-theme==0.5.0 introduced big changes in the style. "sphinx==3.2.1", "starlette", - "tensorflow-cpu>=2.0", - "tensorflow>=2.0", + "tensorflow-cpu>=2.3", + "tensorflow>=2.3", "timeout-decorator", "tokenizers==0.9.4", "torch>=1.0", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index c6901c198f71..b07c53058ff0 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -40,8 +40,8 @@ "sphinx-rtd-theme": "sphinx-rtd-theme==0.4.3", "sphinx": "sphinx==3.2.1", "starlette": "starlette", - "tensorflow-cpu": "tensorflow-cpu>=2.0", - "tensorflow": "tensorflow>=2.0", + "tensorflow-cpu": "tensorflow-cpu>=2.3", + "tensorflow": "tensorflow>=2.3", "timeout-decorator": "timeout-decorator", "tokenizers": "tokenizers==0.9.4", "torch": "torch>=1.0", diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index 987b1d9dc065..485639237fbe 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -246,52 +246,52 @@ def __init__(self, config, **kwargs): if config.hidden_size % config.num_attention_heads != 0: raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads - assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.query = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" + self.query = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="query", ) - self.key = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" + self.key = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="key", ) - self.value = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" + self.value = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="value", ) - self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x, batch_size): - x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) + self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) - return tf.transpose(x, perm=[0, 2, 1, 3]) + def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False): + query_layer = self.query(inputs=hidden_states) + key_layer = self.key(inputs=hidden_states) + value_layer = self.value(inputs=hidden_states) - def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): - batch_size = shape_list(hidden_states)[0] - mixed_query_layer = self.query(hidden_states) - mixed_key_layer = self.key(hidden_states) - mixed_value_layer = self.value(hidden_states) - query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) - key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) - value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = tf.matmul( - query_layer, key_layer, transpose_b=True - ) # (batch size, num_heads, seq_len_q, seq_len_k) - dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores - attention_scores = attention_scores / tf.math.sqrt(dk) + # Take the dot product between "query" and "key" to get the raw + # attention scores. + dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype) + query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk)) + attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. - attention_probs = tf.nn.softmax(attention_scores, axis=-1) + attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. @@ -299,14 +299,10 @@ def call(self, hidden_states, attention_mask, head_mask, output_attentions, trai # Mask heads if we want to if head_mask is not None: - attention_probs = attention_probs * head_mask + attention_scores = attention_scores * head_mask - context_layer = tf.matmul(attention_probs, value_layer) - context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) - context_layer = tf.reshape( - context_layer, (batch_size, -1, self.all_head_size) - ) # (batch_size, seq_len_q, all_head_size) - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs @@ -315,16 +311,29 @@ class TFBertSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abcd,cde->abe", + output_shape=(None, self.all_head_size), + bias_axes="e", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states @@ -353,18 +362,22 @@ class TFBertIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + output_shape=(None, config.intermediate_size), + bias_axes="d", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) if isinstance(config.hidden_act, str): - self.intermediate_act_fn = get_tf_activation(config.hidden_act) + self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(inputs=hidden_states) return hidden_states @@ -373,16 +386,20 @@ class TFBertOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + bias_axes="d", + output_shape=(None, config.hidden_size), + kernel_initializer=get_initializer(config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index 3a39b0376207..709b5f26d755 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -69,59 +69,59 @@ ] -# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Electra class TFElectraSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads - assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.query = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" + self.query = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="query", ) - self.key = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" + self.key = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="key", ) - self.value = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" + self.value = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="value", ) - self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x, batch_size): - x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) + self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) - return tf.transpose(x, perm=[0, 2, 1, 3]) + def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False): + query_layer = self.query(inputs=hidden_states) + key_layer = self.key(inputs=hidden_states) + value_layer = self.value(inputs=hidden_states) - def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): - batch_size = shape_list(hidden_states)[0] - mixed_query_layer = self.query(hidden_states) - mixed_key_layer = self.key(hidden_states) - mixed_value_layer = self.value(hidden_states) - query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) - key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) - value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = tf.matmul( - query_layer, key_layer, transpose_b=True - ) # (batch size, num_heads, seq_len_q, seq_len_k) - dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores - attention_scores = attention_scores / tf.math.sqrt(dk) + # Take the dot product between "query" and "key" to get the raw + # attention scores. + dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype) + query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk)) + attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer) if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) + # Apply the attention mask is (precomputed for all layers in TFElectraModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. - attention_probs = tf.nn.softmax(attention_scores, axis=-1) + attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. @@ -129,33 +129,42 @@ def call(self, hidden_states, attention_mask, head_mask, output_attentions, trai # Mask heads if we want to if head_mask is not None: - attention_probs = attention_probs * head_mask + attention_scores = attention_scores * head_mask - context_layer = tf.matmul(attention_probs, value_layer) - context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) - context_layer = tf.reshape( - context_layer, (batch_size, -1, self.all_head_size) - ) # (batch_size, seq_len_q, all_head_size) - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs -# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Electra class TFElectraSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abcd,cde->abe", + output_shape=(None, self.all_head_size), + bias_axes="e", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states @@ -186,18 +195,22 @@ class TFElectraIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + output_shape=(None, config.intermediate_size), + bias_axes="d", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) if isinstance(config.hidden_act, str): - self.intermediate_act_fn = get_tf_activation(config.hidden_act) + self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(inputs=hidden_states) return hidden_states @@ -207,16 +220,20 @@ class TFElectraOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + bias_axes="d", + output_shape=(None, config.hidden_size), + kernel_initializer=get_initializer(config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index db30435be567..8f0d4fb91c22 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -618,18 +618,22 @@ class TFLongformerIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + output_shape=(None, config.intermediate_size), + bias_axes="d", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) if isinstance(config.hidden_act, str): - self.intermediate_act_fn = get_tf_activation(config.hidden_act) + self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(inputs=hidden_states) return hidden_states @@ -639,16 +643,20 @@ class TFLongformerOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + bias_axes="d", + output_shape=(None, config.hidden_size), + kernel_initializer=get_initializer(config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states @@ -674,7 +682,6 @@ def call(self, hidden_states): return pooled_output -# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput class TFLongformerSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index 02f462572d41..23d3d45d6ed2 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -239,54 +239,58 @@ def __init__(self, config, **kwargs): if config.hidden_size % config.num_attention_heads != 0: raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads - assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.q = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="q" + self.q = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="q", ) - self.k = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="k" + self.k = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="k", ) - self.v = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="v" + self.v = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="v", ) - self.o = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="o" + self.o = tf.keras.layers.experimental.EinsumDense( + equation="abcd,cde->abe", + output_shape=(None, self.all_head_size), + bias_axes="e", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="o", ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) - def transpose_for_scores(self, x, batch_size): - x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) - - return tf.transpose(x, perm=[0, 2, 1, 3]) - def call(self, hidden_states, attention_mask, head_mask, output_attentions, position_bias=None, training=False): - batch_size = shape_list(hidden_states)[0] - q = self.q(hidden_states) k = self.k(hidden_states) v = self.v(hidden_states) - q = self.transpose_for_scores(q, batch_size) - k = self.transpose_for_scores(k, batch_size) - v = self.transpose_for_scores(v, batch_size) - - attention_scores = tf.matmul(q, k, transpose_b=True) - dk = tf.cast(shape_list(k)[-1], attention_scores.dtype) - attention_scores = attention_scores / tf.math.sqrt(dk) + dk = tf.cast(x=self.attention_head_size, dtype=q.dtype) + q = tf.multiply(x=q, y=tf.math.rsqrt(x=dk)) + attention_scores = tf.einsum("aecd,abcd->acbe", k, q) # Apply relative position embedding (precomputed in MPNetEncoder) if provided. if position_bias is not None: attention_scores += position_bias if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in TFMPNetModel call() function) attention_scores = attention_scores + attention_mask attention_probs = tf.nn.softmax(attention_scores, axis=-1) @@ -296,9 +300,7 @@ def call(self, hidden_states, attention_mask, head_mask, output_attentions, posi if head_mask is not None: attention_probs = attention_probs * head_mask - c = tf.matmul(attention_probs, v) - c = tf.transpose(c, perm=[0, 2, 1, 3]) - c = tf.reshape(c, (batch_size, -1, self.all_head_size)) + c = tf.einsum("acbe,aecd->abcd", attention_probs, v) o = self.o(c) outputs = (o, attention_probs) if output_attentions else (o,) @@ -330,18 +332,22 @@ class TFMPNetIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + output_shape=(None, config.intermediate_size), + bias_axes="d", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) if isinstance(config.hidden_act, str): - self.intermediate_act_fn = get_tf_activation(config.hidden_act) + self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(inputs=hidden_states) return hidden_states @@ -351,16 +357,20 @@ class TFMPNetOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + bias_axes="d", + output_shape=(None, config.hidden_size), + kernel_initializer=get_initializer(config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index ae5f3dd22393..a7c56b174607 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -243,59 +243,59 @@ def call(self, hidden_states): return pooled_output -# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Roberta class TFRobertaSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads - assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.query = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" + self.query = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="query", ) - self.key = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" + self.key = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="key", ) - self.value = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" + self.value = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="value", ) - self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x, batch_size): - x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) + self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) - return tf.transpose(x, perm=[0, 2, 1, 3]) + def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False): + query_layer = self.query(inputs=hidden_states) + key_layer = self.key(inputs=hidden_states) + value_layer = self.value(inputs=hidden_states) - def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): - batch_size = shape_list(hidden_states)[0] - mixed_query_layer = self.query(hidden_states) - mixed_key_layer = self.key(hidden_states) - mixed_value_layer = self.value(hidden_states) - query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) - key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) - value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = tf.matmul( - query_layer, key_layer, transpose_b=True - ) # (batch size, num_heads, seq_len_q, seq_len_k) - dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores - attention_scores = attention_scores / tf.math.sqrt(dk) + # Take the dot product between "query" and "key" to get the raw + # attention scores. + dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype) + query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk)) + attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer) if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) + # Apply the attention mask is (precomputed for all layers in TFRobertaModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. - attention_probs = tf.nn.softmax(attention_scores, axis=-1) + attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. @@ -303,33 +303,42 @@ def call(self, hidden_states, attention_mask, head_mask, output_attentions, trai # Mask heads if we want to if head_mask is not None: - attention_probs = attention_probs * head_mask + attention_scores = attention_scores * head_mask - context_layer = tf.matmul(attention_probs, value_layer) - context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) - context_layer = tf.reshape( - context_layer, (batch_size, -1, self.all_head_size) - ) # (batch_size, seq_len_q, all_head_size) - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs -# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Roberta class TFRobertaSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abcd,cde->abe", + output_shape=(None, self.all_head_size), + bias_axes="e", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states @@ -360,18 +369,22 @@ class TFRobertaIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + output_shape=(None, config.intermediate_size), + bias_axes="d", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) if isinstance(config.hidden_act, str): - self.intermediate_act_fn = get_tf_activation(config.hidden_act) + self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(inputs=hidden_states) return hidden_states @@ -381,16 +394,20 @@ class TFRobertaOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + bias_axes="d", + output_shape=(None, config.hidden_size), + kernel_initializer=get_initializer(config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index 109b9f310b4e..5c8ffbfc4132 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -191,52 +191,52 @@ def __init__(self, config, **kwargs): if config.hidden_size % config.num_attention_heads != 0: raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads - assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.query = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" + self.query = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="query", ) - self.key = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" + self.key = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="key", ) - self.value = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" + self.value = tf.keras.layers.experimental.EinsumDense( + equation="abc,cde->abde", + output_shape=(None, config.num_attention_heads, self.attention_head_size), + bias_axes="de", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="value", ) - self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x, batch_size): - x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) + self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) - return tf.transpose(x, perm=[0, 2, 1, 3]) + def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False): + query_layer = self.query(inputs=hidden_states) + key_layer = self.key(inputs=hidden_states) + value_layer = self.value(inputs=hidden_states) - def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): - batch_size = shape_list(hidden_states)[0] - mixed_query_layer = self.query(hidden_states) - mixed_key_layer = self.key(hidden_states) - mixed_value_layer = self.value(hidden_states) - query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) - key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) - value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = tf.matmul( - query_layer, key_layer, transpose_b=True - ) # (batch size, num_heads, seq_len_q, seq_len_k) - dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores - attention_scores = attention_scores / tf.math.sqrt(dk) + # Take the dot product between "query" and "key" to get the raw + # attention scores. + dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype) + query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk)) + attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TF{{cookiecutter.camelcase_modelname}}Model call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. - attention_probs = tf.nn.softmax(attention_scores, axis=-1) + attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. @@ -244,14 +244,10 @@ def call(self, hidden_states, attention_mask, head_mask, output_attentions, trai # Mask heads if we want to if head_mask is not None: - attention_probs = attention_probs * head_mask + attention_scores = attention_scores * head_mask - context_layer = tf.matmul(attention_probs, value_layer) - context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) - context_layer = tf.reshape( - context_layer, (batch_size, -1, self.all_head_size) - ) # (batch_size, seq_len_q, all_head_size) - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs @@ -261,16 +257,29 @@ class TF{{cookiecutter.camelcase_modelname}}SelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abcd,cde->abe", + output_shape=(None, self.all_head_size), + bias_axes="e", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states From 161a6461db3c673672edd5b994848b0d50db1b67 Mon Sep 17 00:00:00 2001 From: Julien Plu Date: Mon, 21 Dec 2020 13:52:16 +0100 Subject: [PATCH 5/8] Fix TF template (#9234) --- ...tf_{{cookiecutter.lowercase_modelname}}.py | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index 5c8ffbfc4132..5d505371d0d6 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -310,18 +310,22 @@ class TF{{cookiecutter.camelcase_modelname}}Intermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + output_shape=(None, config.intermediate_size), + bias_axes="d", + kernel_initializer=get_initializer(initializer_range=config.initializer_range), + name="dense", ) if isinstance(config.hidden_act, str): - self.intermediate_act_fn = get_tf_activation(config.hidden_act) + self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(inputs=hidden_states) return hidden_states @@ -331,16 +335,20 @@ class TF{{cookiecutter.camelcase_modelname}}Output(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense( - config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + self.dense = tf.keras.layers.experimental.EinsumDense( + equation="abc,cd->abd", + bias_axes="d", + output_shape=(None, config.hidden_size), + kernel_initializer=get_initializer(config.initializer_range), + name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states From 08abdabda140c3e9ce14e157fe0e482b605bb7d7 Mon Sep 17 00:00:00 2001 From: TobiasNorlund Date: Mon, 21 Dec 2020 14:05:23 +0100 Subject: [PATCH 6/8] Fixed beam search generation for GPT2 and T5 (#9219) --- src/transformers/generation_utils.py | 4 ++-- tests/test_modeling_common.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 91cc97c95c1e..4c2f20f040b2 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -156,7 +156,7 @@ def _expand_inputs_for_generation( if is_encoder_decoder: assert encoder_outputs is not None encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( - 0, expanded_return_idx + 0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device) ) model_kwargs["encoder_outputs"] = encoder_outputs return input_ids, model_kwargs @@ -226,7 +226,7 @@ def _reorder_cache(past: Tuple[torch.Tensor], beam_idx: torch.Tensor) -> Tuple[t For custom re-ordering of :obj:`past_key_values` or :obj:`mems`, the function should be implemented in subclasses of :class:`~transformers.PreTrainedModel`. """ - return tuple(layer_past.index_select(1, beam_idx) for layer_past in past) + return tuple(layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in past) def _get_logits_warper( self, top_k: int = None, top_p: float = None, temperature: float = None, num_beams: int = None diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 26fc6be67228..57b421c61085 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -1166,6 +1166,34 @@ def cast_to_device(dictionary, device): for value_, parallel_value_ in zip(value, parallel_value): self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7)) + @require_torch_multi_gpu + def test_model_parallel_beam_search(self): + if not self.test_model_parallel: + return + + all_generative_and_parallelizable_model_classes = tuple( + set(self.all_generative_model_classes).intersection(self.all_parallelizable_model_classes) + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in all_generative_and_parallelizable_model_classes: + inputs_dict = self._prepare_for_class(inputs_dict, model_class) + model = model_class(config) + + def cast_to_device(dictionary, device): + output = {} + for k, v in dictionary.items(): + if isinstance(v, torch.Tensor): + output[k] = v.to(device) + else: + output[k] = v + + return output + + model.parallelize() + model.generate(**cast_to_device(inputs_dict, "cuda:0"), num_beams=2) + global_rng = random.Random() From f4432b7e01dc46008fb823096d884bdc2861b49c Mon Sep 17 00:00:00 2001 From: Suraj Patil Date: Mon, 21 Dec 2020 19:56:46 +0530 Subject: [PATCH 7/8] add base model classes to bart subclassed models (#9230) * add base model classes to bart subclassed models * add doc --- docs/source/model_doc/blenderbot.rst | 9 +++++++ docs/source/model_doc/mbart.rst | 7 +++++ docs/source/model_doc/pegasus.rst | 6 +++++ src/transformers/__init__.py | 10 ++++--- src/transformers/models/auto/modeling_auto.py | 10 ++++--- .../models/blenderbot/__init__.py | 6 ++++- .../models/blenderbot/modeling_blenderbot.py | 17 ++++++++++-- src/transformers/models/mbart/__init__.py | 2 +- .../models/mbart/modeling_mbart.py | 19 ++++++++++++- src/transformers/models/pegasus/__init__.py | 2 +- .../models/pegasus/modeling_pegasus.py | 26 +++++++++++++++++- src/transformers/utils/dummy_pt_objects.py | 27 +++++++++++++++++++ tests/test_modeling_blenderbot.py | 3 ++- tests/test_modeling_mbart.py | 3 ++- tests/test_modeling_pegasus.py | 4 +-- 15 files changed, 134 insertions(+), 17 deletions(-) diff --git a/docs/source/model_doc/blenderbot.rst b/docs/source/model_doc/blenderbot.rst index ddceeb81c1bf..df43c90ef076 100644 --- a/docs/source/model_doc/blenderbot.rst +++ b/docs/source/model_doc/blenderbot.rst @@ -100,6 +100,15 @@ BlenderbotSmallTokenizer :members: +BlenderbotModel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +See :obj:`transformers.BartModel` for arguments to `forward` and `generate` + +.. autoclass:: transformers.BlenderbotModel + :members: + + BlenderbotForConditionalGeneration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/model_doc/mbart.rst b/docs/source/model_doc/mbart.rst index eb9b9798024d..4ac391255eb5 100644 --- a/docs/source/model_doc/mbart.rst +++ b/docs/source/model_doc/mbart.rst @@ -97,6 +97,13 @@ MBartTokenizerFast :members: +MBartModel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.MBartModel + :members: + + MBartForConditionalGeneration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/model_doc/pegasus.rst b/docs/source/model_doc/pegasus.rst index 42b3e5ea57b6..3fab320ebcbc 100644 --- a/docs/source/model_doc/pegasus.rst +++ b/docs/source/model_doc/pegasus.rst @@ -119,6 +119,12 @@ PegasusTokenizerFast :members: +PegasusModel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.PegasusModel + + PegasusForConditionalGeneration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 4586fe5363f3..580318abaa21 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -406,7 +406,11 @@ BertGenerationEncoder, load_tf_weights_in_bert_generation, ) - from .models.blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration + from .models.blenderbot import ( + BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, + BlenderbotForConditionalGeneration, + BlenderbotModel, + ) from .models.camembert import ( CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, CamembertForCausalLM, @@ -522,7 +526,7 @@ LxmertXLayer, ) from .models.marian import MarianMTModel - from .models.mbart import MBartForConditionalGeneration + from .models.mbart import MBartForConditionalGeneration, MBartModel from .models.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings from .models.mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, @@ -559,7 +563,7 @@ OpenAIGPTPreTrainedModel, load_tf_weights_in_openai_gpt, ) - from .models.pegasus import PegasusForConditionalGeneration + from .models.pegasus import PegasusForConditionalGeneration, PegasusModel from .models.prophetnet import ( PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, ProphetNetDecoder, diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 4b9141d02455..3fc5c702e7df 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -50,7 +50,7 @@ BertModel, ) from ..bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder -from ..blenderbot.modeling_blenderbot import BlenderbotForConditionalGeneration +from ..blenderbot.modeling_blenderbot import BlenderbotForConditionalGeneration, BlenderbotModel from ..camembert.modeling_camembert import ( CamembertForCausalLM, CamembertForMaskedLM, @@ -111,7 +111,7 @@ ) from ..lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel from ..marian.modeling_marian import MarianMTModel -from ..mbart.modeling_mbart import MBartForConditionalGeneration +from ..mbart.modeling_mbart import MBartForConditionalGeneration, MBartModel from ..mobilebert.modeling_mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, @@ -132,7 +132,7 @@ ) from ..mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model from ..openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel -from ..pegasus.modeling_pegasus import PegasusForConditionalGeneration +from ..pegasus.modeling_pegasus import PegasusForConditionalGeneration, PegasusModel from ..prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel from ..rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function RagModel, @@ -255,6 +255,10 @@ (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), + (PegasusConfig, PegasusModel), + (MarianConfig, MarianMTModel), + (MBartConfig, MBartModel), + (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), diff --git a/src/transformers/models/blenderbot/__init__.py b/src/transformers/models/blenderbot/__init__.py index fdcd990ff938..fccb38f80ac1 100644 --- a/src/transformers/models/blenderbot/__init__.py +++ b/src/transformers/models/blenderbot/__init__.py @@ -22,7 +22,11 @@ if is_torch_available(): - from .modeling_blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration + from .modeling_blenderbot import ( + BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, + BlenderbotForConditionalGeneration, + BlenderbotModel, + ) if is_tf_available(): from .modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 1421a87ca9bf..2a370fbabf86 100644 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -19,7 +19,7 @@ import torch from ...file_utils import add_start_docstrings -from ..bart.modeling_bart import BartForConditionalGeneration +from ..bart.modeling_bart import BartForConditionalGeneration, BartModel from .configuration_blenderbot import BlenderbotConfig @@ -39,7 +39,20 @@ @add_start_docstrings( - "The BART Model with a language modeling head. Can be used for summarization.", BLENDER_START_DOCSTRING + "The bare BlenderBot Model transformer outputting raw hidden-states without any specific head on top.", + BLENDER_START_DOCSTRING, +) +class BlenderbotModel(BartModel): + r""" + This class overrides :class:`~transformers.BartModel`. Please check the superclass for the appropriate + documentation alongside usage examples. + """ + + config_class = BlenderbotConfig + + +@add_start_docstrings( + "The BlenderBot Model with a language modeling head. Can be used for summarization.", BLENDER_START_DOCSTRING ) class BlenderbotForConditionalGeneration(BartForConditionalGeneration): """ diff --git a/src/transformers/models/mbart/__init__.py b/src/transformers/models/mbart/__init__.py index b98d22662503..2fa8876085ed 100644 --- a/src/transformers/models/mbart/__init__.py +++ b/src/transformers/models/mbart/__init__.py @@ -27,7 +27,7 @@ from .tokenization_mbart_fast import MBartTokenizerFast if is_torch_available(): - from .modeling_mbart import MBartForConditionalGeneration + from .modeling_mbart import MBartForConditionalGeneration, MBartModel if is_tf_available(): from .modeling_tf_mbart import TFMBartForConditionalGeneration diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 9fca52c5495d..f4aa39b07514 100644 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..bart.modeling_bart import BartForConditionalGeneration +from ..bart.modeling_bart import BartForConditionalGeneration, BartModel from .configuration_mbart import MBartConfig @@ -26,6 +26,23 @@ ] +class MBartModel(BartModel): + r""" + This class overrides :class:`~transformers.BartModel`. Please check the superclass for the appropriate + documentation alongside usage examples. + """ + + config_class = MBartConfig + _keys_to_ignore_on_load_missing = [ + "encoder.embed_positions.weight", + "decoder.embed_positions.weight", + ] + _keys_to_ignore_on_save = [ + "encoder.embed_positions.weight", + "decoder.embed_positions.weight", + ] + + class MBartForConditionalGeneration(BartForConditionalGeneration): r""" This class overrides :class:`~transformers.BartForConditionalGeneration`. Please check the superclass for the diff --git a/src/transformers/models/pegasus/__init__.py b/src/transformers/models/pegasus/__init__.py index e7cc0ce71bea..20d1c3872dc1 100644 --- a/src/transformers/models/pegasus/__init__.py +++ b/src/transformers/models/pegasus/__init__.py @@ -27,7 +27,7 @@ from .tokenization_pegasus_fast import PegasusTokenizerFast if is_torch_available(): - from .modeling_pegasus import PegasusForConditionalGeneration + from .modeling_pegasus import PegasusForConditionalGeneration, PegasusModel if is_tf_available(): from .modeling_tf_pegasus import TFPegasusForConditionalGeneration diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 3e623a770406..c7fde4164330 100644 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -16,10 +16,34 @@ from ...file_utils import add_start_docstrings -from ..bart.modeling_bart import BART_START_DOCSTRING, BartForConditionalGeneration +from ..bart.modeling_bart import BART_START_DOCSTRING, BartForConditionalGeneration, BartModel from .configuration_pegasus import PegasusConfig +@add_start_docstrings( + "The bare Pegasus Model transformer outputting raw hidden-states without any specific head on top.", + BART_START_DOCSTRING, +) +class PegasusModel(BartModel): + r""" + This class overrides :class:`~transformers.BartModel`. Please check the superclass for the appropriate + documentation alongside usage examples. + """ + + config_class = PegasusConfig + _keys_to_ignore_on_load_missing = [ + r"final_logits_bias", + r"encoder\.version", + r"decoder\.version", + "encoder.embed_positions", + "decoder.embed_positions", + ] + _keys_to_ignore_on_save = [ + "encoder.embed_positions.weight", + "decoder.embed_positions.weight", + ] + + @add_start_docstrings("The Pegasus Model for summarization ", BART_START_DOCSTRING) class PegasusForConditionalGeneration(BartForConditionalGeneration): r""" diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 050c7ba4f90a..97669eff742b 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -600,6 +600,15 @@ def from_pretrained(self, *args, **kwargs): requires_pytorch(self) +class BlenderbotModel: + def __init__(self, *args, **kwargs): + requires_pytorch(self) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_pytorch(self) + + CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None @@ -1297,6 +1306,15 @@ def from_pretrained(self, *args, **kwargs): requires_pytorch(self) +class MBartModel: + def __init__(self, *args, **kwargs): + requires_pytorch(self) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_pytorch(self) + + class MMBTForClassification: def __init__(self, *args, **kwargs): requires_pytorch(self) @@ -1560,6 +1578,15 @@ def from_pretrained(self, *args, **kwargs): requires_pytorch(self) +class PegasusModel: + def __init__(self, *args, **kwargs): + requires_pytorch(self) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_pytorch(self) + + PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/test_modeling_blenderbot.py b/tests/test_modeling_blenderbot.py index b069ba6089bc..668569a59553 100644 --- a/tests/test_modeling_blenderbot.py +++ b/tests/test_modeling_blenderbot.py @@ -32,6 +32,7 @@ AutoTokenizer, BlenderbotConfig, BlenderbotForConditionalGeneration, + BlenderbotModel, BlenderbotSmallTokenizer, BlenderbotTokenizer, ) @@ -90,7 +91,7 @@ def prepare_config_and_inputs_for_common(self): class BlenderbotTesterMixin(ModelTesterMixin, unittest.TestCase): if is_torch_available(): all_generative_model_classes = (BlenderbotForConditionalGeneration,) - all_model_classes = (BlenderbotForConditionalGeneration,) + all_model_classes = (BlenderbotForConditionalGeneration, BlenderbotModel) else: all_generative_model_classes = () all_model_classes = () diff --git a/tests/test_modeling_mbart.py b/tests/test_modeling_mbart.py index 1a4094ed2ce9..2a43650febbd 100644 --- a/tests/test_modeling_mbart.py +++ b/tests/test_modeling_mbart.py @@ -30,6 +30,7 @@ BatchEncoding, MBartConfig, MBartForConditionalGeneration, + MBartModel, ) @@ -59,7 +60,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class SelectiveCommonTest(unittest.TestCase): - all_model_classes = (MBartForConditionalGeneration,) if is_torch_available() else () + all_model_classes = (MBartForConditionalGeneration, MBartModel) if is_torch_available() else () test_save_load__keys_to_ignore_on_save = ModelTesterMixin.test_save_load__keys_to_ignore_on_save diff --git a/tests/test_modeling_pegasus.py b/tests/test_modeling_pegasus.py index 42173ebccfbb..dc9fdf522547 100644 --- a/tests/test_modeling_pegasus.py +++ b/tests/test_modeling_pegasus.py @@ -26,7 +26,7 @@ if is_torch_available(): - from transformers import AutoModelForSeq2SeqLM, PegasusConfig, PegasusForConditionalGeneration + from transformers import AutoModelForSeq2SeqLM, PegasusConfig, PegasusForConditionalGeneration, PegasusModel XSUM_ENTRY_LONGER = """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning 'Oh I think you're nominated'", said Dappy."And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around."At the end of the day we're grateful to be where we are in our careers."If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" """ @@ -55,7 +55,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class SelectiveCommonTest(unittest.TestCase): - all_model_classes = (PegasusForConditionalGeneration,) if is_torch_available() else () + all_model_classes = (PegasusForConditionalGeneration, PegasusModel) if is_torch_available() else () test_save_load__keys_to_ignore_on_save = ModelTesterMixin.test_save_load__keys_to_ignore_on_save From 9a12b9696fca52c71601b59a73c8e18426519027 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 21 Dec 2020 15:41:34 +0100 Subject: [PATCH 8/8] [MPNet] Add slow to fast tokenizer converter (#9233) * add converter * delet unnecessary comments --- src/transformers/convert_slow_tokenizer.py | 64 ++++++++++++++-------- tests/test_tokenization_mpnet.py | 5 +- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/src/transformers/convert_slow_tokenizer.py b/src/transformers/convert_slow_tokenizer.py index b1b3408acb6a..2c0f9f7f2d07 100644 --- a/src/transformers/convert_slow_tokenizer.py +++ b/src/transformers/convert_slow_tokenizer.py @@ -74,18 +74,6 @@ def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) - # # Let the tokenizer know about special tokens if they are part of the vocab - # if tokenizer.token_to_id(str(self.original_tokenizer.unk_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.unk_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.sep_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.sep_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.cls_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.cls_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.pad_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.pad_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.mask_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.mask_token)]) - tokenize_chinese_chars = False strip_accents = False do_lower_case = False @@ -125,18 +113,6 @@ def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) - # # Let the tokenizer know about special tokens if they are part of the vocab - # if tokenizer.token_to_id(str(self.original_tokenizer.unk_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.unk_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.sep_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.sep_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.cls_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.cls_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.pad_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.pad_token)]) - # if tokenizer.token_to_id(str(self.original_tokenizer.mask_token)) is not None: - # tokenizer.add_special_tokens([str(self.original_tokenizer.mask_token)]) - tokenize_chinese_chars = False strip_accents = False do_lower_case = False @@ -171,6 +147,45 @@ def converted(self) -> Tokenizer: return tokenizer +class MPNetConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.vocab + tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) + + tokenize_chinese_chars = False + strip_accents = False + do_lower_case = False + if hasattr(self.original_tokenizer, "basic_tokenizer"): + tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars + strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents + do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case + + tokenizer.normalizer = normalizers.BertNormalizer( + clean_text=True, + handle_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + lowercase=do_lower_case, + ) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls}:0 $A:0 {sep}:0", + pair=f"{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1", # MPNet uses two [SEP] tokens + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + tokenizer.decoder = decoders.WordPiece(prefix="##") + + return tokenizer + + class OpenAIGPTConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder @@ -602,6 +617,7 @@ def post_processor(self): "LongformerTokenizer": RobertaConverter, "LxmertTokenizer": BertConverter, "MBartTokenizer": MBartConverter, + "MPNetTokenizer": MPNetConverter, "MobileBertTokenizer": BertConverter, "OpenAIGPTTokenizer": OpenAIGPTConverter, "PegasusTokenizer": PegasusConverter, diff --git a/tests/test_tokenization_mpnet.py b/tests/test_tokenization_mpnet.py index 2a4f26ff95c5..733b2891f876 100644 --- a/tests/test_tokenization_mpnet.py +++ b/tests/test_tokenization_mpnet.py @@ -17,6 +17,7 @@ import os import unittest +from transformers import MPNetTokenizerFast from transformers.models.mpnet.tokenization_mpnet import VOCAB_FILES_NAMES, MPNetTokenizer from transformers.testing_utils import require_tokenizers, slow @@ -27,7 +28,9 @@ class MPNetTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MPNetTokenizer - test_rust_tokenizer = False + rust_tokenizer_class = MPNetTokenizerFast + test_rust_tokenizer = True + space_between_special_tokens = True def setUp(self): super().setUp()