From f729288fefab8799eabac56efbb4896eb8c75bc9 Mon Sep 17 00:00:00 2001 From: Kenneth Enevoldsen Date: Sat, 17 Feb 2024 13:14:32 +0100 Subject: [PATCH] style: ran linting --- src/seb/registered_models/e5_mistral.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/seb/registered_models/e5_mistral.py b/src/seb/registered_models/e5_mistral.py index 18fb2996..9e89804f 100644 --- a/src/seb/registered_models/e5_mistral.py +++ b/src/seb/registered_models/e5_mistral.py @@ -121,8 +121,6 @@ def preprocess(self, sentences: Sequence[str], instruction: str, encode_type: En ] batch_dict = self.tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors="pt") - - return batch_dict.to(self.model.device) # but it does not work slightly better than this: @@ -157,7 +155,6 @@ def encode( else: instruction = "" for batch in tqdm(batched(sentences, batch_size)): - with torch.inference_mode(): batch_dict = self.preprocess(batch, instruction=instruction, encode_type=encode_type) outputs = self.model(**batch_dict)