From abdb8d64952b962917a8124993cfb3e385c32caf Mon Sep 17 00:00:00 2001 From: Matt Buchovecky Date: Thu, 22 Aug 2024 16:20:53 -0700 Subject: [PATCH] fix: use proper eval default main eval metrics for text pair regressor --- flair/models/pairwise_regression_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flair/models/pairwise_regression_model.py b/flair/models/pairwise_regression_model.py index c67d81ca16..c3f34e0f69 100644 --- a/flair/models/pairwise_regression_model.py +++ b/flair/models/pairwise_regression_model.py @@ -283,7 +283,7 @@ def evaluate( out_path: Union[str, Path, None] = None, embedding_storage_mode: EmbeddingStorageMode = "none", mini_batch_size: int = 32, - main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"), + main_evaluation_metric: Tuple[str, str] = ("correlation", "pearson"), exclude_labels: Optional[List[str]] = None, gold_label_dictionary: Optional[Dictionary] = None, return_loss: bool = True,