diff --git a/src/huggingface_hub/repocard_data.py b/src/huggingface_hub/repocard_data.py index 902da3f3e3..0daac082c4 100644 --- a/src/huggingface_hub/repocard_data.py +++ b/src/huggingface_hub/repocard_data.py @@ -42,15 +42,15 @@ class EvalResult: Example: 5503434ddd753f426f4b38109466949a1217c2bb dataset_args (`Dict[str, Any]`, *optional*): The arguments passed during `Metric.compute()`. Example for `bleu`: `{"max_order": 4}` - metric_name: (`str`, *optional*): + metric_name (`str`, *optional*): A pretty name for the metric. Example: "Test WER". - metric_config: (`str`, *optional*): + metric_config (`str`, *optional*): The name of the metric configuration used in `load_metric()`. Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`. See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations - metric_args: (`Dict[str, Any]`, *optional*): + metric_args (`Dict[str, Any]`, *optional*): The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4 - verified: (`bool`, *optional*): + verified (`bool`, *optional*): If true, indicates that evaluation was generated by Hugging Face (vs. self-reported). """