From 93fcff38a4f2075296d4a2da33e9be682b9e1734 Mon Sep 17 00:00:00 2001 From: Tvrtko Sternak <117077296+sternakt@users.noreply.github.com> Date: Thu, 20 Feb 2025 11:13:39 +0100 Subject: [PATCH 1/2] Support for loading response format as a pydantic model from configuration files (#1023) * WIP * Implement loading response_format from json configuration * Add tests * Text tweaks to notebook Signed-off-by: Mark Sze * Fix configuration example in notebook * WIP: pass json structured output to clients * Add anthropic structured output from json schema * Test ollama model * Cleanup * Add tests for openai * Add tests for anthropic and deepseek * Clean notebooks * Fix anthropic structured output tests * Replace issubclass with isinstance * Remove deepseek from structured output tests * Add list of credentials that fully support structured output * Revert renaming * Revert renaming * Polish * Replace anthropic with gemini in stuctured output tests --------- Signed-off-by: Mark Sze Co-authored-by: Mark Sze --- .secrets.baseline | 38 +-- autogen/oai/anthropic.py | 20 +- autogen/oai/client.py | 27 +- autogen/oai/gemini.py | 14 +- autogen/oai/ollama.py | 13 +- autogen/oai/openai_utils.py | 4 + ...tchat_structured_outputs_from_config.ipynb | 245 ++++++++++++++++++ test/agentchat/test_structured_output.py | 75 ++++-- test/conftest.py | 1 - test/oai/test_utils.py | 6 +- 10 files changed, 384 insertions(+), 59 deletions(-) create mode 100644 notebook/agentchat_structured_outputs_from_config.ipynb diff --git a/.secrets.baseline b/.secrets.baseline index f23ccea0ed..b2b53c9ec2 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -143,7 +143,7 @@ "filename": "autogen/oai/openai_utils.py", "hashed_secret": "aa5bc2e0df7182f74186f26d6e9063b9d57603ec", "is_verified": false, - "line_number": 352, + "line_number": 353, "is_secret": false }, { @@ -151,7 +151,7 @@ "filename": "autogen/oai/openai_utils.py", "hashed_secret": "cbb43d092552e9af4b21efc76bc8c49c071c1d81", "is_verified": false, - "line_number": 353, + "line_number": 354, "is_secret": false }, { @@ -159,7 +159,7 @@ "filename": "autogen/oai/openai_utils.py", "hashed_secret": "79d8b9da0f827f788759bdbe5b9254a02c74d877", "is_verified": false, - "line_number": 573, + "line_number": 577, "is_secret": false } ], @@ -1035,7 +1035,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "f72c85879027f6160ce36e1c5074ef8207bfe105", "is_verified": false, - "line_number": 26, + "line_number": 30, "is_secret": false }, { @@ -1043,7 +1043,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "4c88039c5079180dacb0e29d715055d95b2b7589", "is_verified": false, - "line_number": 35, + "line_number": 39, "is_secret": false }, { @@ -1051,7 +1051,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "7460e665be1988cc62f1caf9d47716b07d55858c", "is_verified": false, - "line_number": 65, + "line_number": 69, "is_secret": false }, { @@ -1059,7 +1059,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "b5c2827eb65bf13b87130e7e3c424ba9ff07cd67", "is_verified": false, - "line_number": 72, + "line_number": 76, "is_secret": false }, { @@ -1067,7 +1067,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "178c7a21b087dfafc826a21b61aff284c71fd258", "is_verified": false, - "line_number": 198, + "line_number": 202, "is_secret": false }, { @@ -1075,7 +1075,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "aa5c90e1b80bb987f562ac30eaa1a71c832892f5", "is_verified": false, - "line_number": 199, + "line_number": 203, "is_secret": false }, { @@ -1083,7 +1083,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "4489f55309f29853a4075cbbdf1f18b584809726", "is_verified": false, - "line_number": 201, + "line_number": 205, "is_secret": false }, { @@ -1091,7 +1091,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "95cfb33d5e102631e226e7ff9da4b17d6ba5f3e4", "is_verified": false, - "line_number": 213, + "line_number": 217, "is_secret": false }, { @@ -1099,7 +1099,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "7943297a6a2188abe697bd1e0189fdd1274818be", "is_verified": false, - "line_number": 215, + "line_number": 219, "is_secret": false }, { @@ -1107,7 +1107,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "8cc86c45479a8e0bbb1ddea57d3e195b611241f2", "is_verified": false, - "line_number": 235, + "line_number": 239, "is_secret": false }, { @@ -1115,7 +1115,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "eda6571eea7bd0ac4553ac9d745631f1f2bec7a4", "is_verified": false, - "line_number": 237, + "line_number": 241, "is_secret": false }, { @@ -1123,7 +1123,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "0ad02c88ffd9754bfbfc24ade0bf8bc48d76b232", "is_verified": false, - "line_number": 246, + "line_number": 250, "is_secret": false }, { @@ -1131,7 +1131,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "11841233da3f9f37c5fa14e8b482dde913db6edf", "is_verified": false, - "line_number": 254, + "line_number": 258, "is_secret": false }, { @@ -1139,7 +1139,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "11cac88cbfa53881646b024097f531c4f234151b", "is_verified": false, - "line_number": 432, + "line_number": 436, "is_secret": false }, { @@ -1147,7 +1147,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "8e8324e8ea2ec13efb774680c6e3850625e575e6", "is_verified": false, - "line_number": 432, + "line_number": 436, "is_secret": false }, { @@ -1155,7 +1155,7 @@ "filename": "test/oai/test_utils.py", "hashed_secret": "8e2fa04ab430ff4817e87e3294f33727fc78ed6c", "is_verified": false, - "line_number": 435, + "line_number": 439, "is_secret": false } ], diff --git a/autogen/oai/anthropic.py b/autogen/oai/anthropic.py index 9cbe02bc93..a1d1e95003 100644 --- a/autogen/oai/anthropic.py +++ b/autogen/oai/anthropic.py @@ -384,7 +384,10 @@ def _add_response_format_to_system(self, params: dict[str, Any]): return # Get the schema of the Pydantic model - schema = self._response_format.model_json_schema() + if isinstance(self._response_format, dict): + schema = self._response_format + else: + schema = self._response_format.model_json_schema() # Add instructions for JSON formatting format_content = f"""Please provide your response as a JSON object that matches the following schema: @@ -425,16 +428,25 @@ def _extract_json_response(self, response: Message) -> Any: json_str = content[json_start : json_end + 1] try: - # Parse JSON and validate against the Pydantic model + # Parse JSON and validate against the Pydantic model if Pydantic model was provided json_data = json.loads(json_str) - return self._response_format.model_validate(json_data) + if isinstance(self._response_format, dict): + return json_str + else: + return self._response_format.model_validate(json_data) + except Exception as e: raise ValueError(f"Failed to parse response as valid JSON matching the schema for Structured Output: {e!s}") def _format_json_response(response: Any) -> str: """Formats the JSON response for structured outputs using the format method if it exists.""" - return response.format() if isinstance(response, FormatterProtocol) else response + if isinstance(response, str): + return response + elif isinstance(response, FormatterProtocol): + return response.format() + else: + return response.model_dump_json() @require_optional_import("anthropic", "anthropic") diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 2e64e9ff46..d31a09ea36 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -53,6 +53,7 @@ if openai.__version__ >= "1.1.0": TOOL_ENABLED = True ERROR = None + from openai.lib._pydantic import _ensure_strict_json_schema else: ERROR: Optional[ImportError] = ImportError("Please install openai>=1 and diskcache to use autogen.OpenAIWrapper.") OpenAI = object @@ -258,7 +259,9 @@ def __init__(self, config): class OpenAIClient: """Follows the Client protocol and wraps the OpenAI client.""" - def __init__(self, client: Union[OpenAI, AzureOpenAI], response_format: Optional[BaseModel] = None): + def __init__( + self, client: Union[OpenAI, AzureOpenAI], response_format: Union[BaseModel, dict[str, Any], None] = None + ): self._oai_client = client self.response_format = response_format if ( @@ -395,9 +398,23 @@ def create(self, params: dict[str, Any]) -> ChatCompletion: def _create_or_parse(*args, **kwargs): if "stream" in kwargs: kwargs.pop("stream") - kwargs["response_format"] = type_to_response_format_param( - self.response_format or params["response_format"] - ) + + if isinstance(kwargs["response_format"], dict): + kwargs["response_format"] = { + "type": "json_schema", + "json_schema": { + "schema": _ensure_strict_json_schema( + kwargs["response_format"], path=(), root=kwargs["response_format"] + ), + "name": "response_format", + "strict": True, + }, + } + else: + kwargs["response_format"] = type_to_response_format_param( + self.response_format or params["response_format"] + ) + return self._oai_client.chat.completions.create(*args, **kwargs) create_or_parse = _create_or_parse @@ -987,7 +1004,7 @@ def yes_or_no_filter(context, response): **params, **{"response_format": json.dumps(TypeAdapter(params["response_format"]).json_schema())}, } - if "response_format" in params + if "response_format" in params and not isinstance(params["response_format"], dict) else params ) request_ts = get_current_ts() diff --git a/autogen/oai/gemini.py b/autogen/oai/gemini.py index a61c202bcb..208b147475 100644 --- a/autogen/oai/gemini.py +++ b/autogen/oai/gemini.py @@ -252,7 +252,12 @@ def create(self, params: dict) -> ChatCompletion: self._response_format = params.get("response_format") generation_config["response_mime_type"] = "application/json" - response_schema = dict(jsonref.replace_refs(params.get("response_format").model_json_schema())) + response_format_schema_raw = params.get("response_format") + + if isinstance(response_format_schema_raw, dict): + response_schema = dict(jsonref.replace_refs(response_format_schema_raw)) + else: + response_schema = dict(jsonref.replace_refs(params.get("response_format").model_json_schema())) if "$defs" in response_schema: response_schema.pop("$defs") generation_config["response_schema"] = response_schema @@ -571,9 +576,12 @@ def _convert_json_response(self, response: str) -> Any: return response try: - # Parse JSON and validate against the Pydantic model + # Parse JSON and validate against the Pydantic model if Pydantic model was provided json_data = json.loads(response) - return self._response_format.model_validate(json_data) + if isinstance(self._response_format, dict): + return json_data + else: + return self._response_format.model_validate(json_data) except Exception as e: raise ValueError(f"Failed to parse response as valid JSON matching the schema for Structured Output: {e!s}") diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index 137e1867e7..ecabcdf0e5 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -237,7 +237,11 @@ def create(self, params: dict) -> ChatCompletion: # https://ollama.com/blog/structured-outputs if params.get("response_format"): self._response_format = params["response_format"] - ollama_params["format"] = params.get("response_format").model_json_schema() + ollama_params["format"] = ( + params.get("response_format").model_json_schema() + if isinstance(self._response_format, BaseModel) + else params.get("response_format") + ) # Token counts will be returned prompt_tokens = 0 @@ -491,8 +495,11 @@ def _convert_json_response(self, response: str) -> Any: return response try: - # Parse JSON and validate against the Pydantic model - return self._response_format.model_validate_json(response) + # Parse JSON and validate against the Pydantic model if Pydantic model was provided + if isinstance(self._response_format, dict): + return response + else: + return self._response_format.model_validate_json(response) except Exception as e: raise ValueError(f"Failed to parse response as valid JSON matching the schema for Structured Output: {e!s}") diff --git a/autogen/oai/openai_utils.py b/autogen/oai/openai_utils.py index bf476c421d..b391a5d081 100644 --- a/autogen/oai/openai_utils.py +++ b/autogen/oai/openai_utils.py @@ -4,6 +4,7 @@ # # Portions derived from https://github.com/microsoft/autogen are under the MIT License. # SPDX-License-Identifier: MIT +import importlib import importlib.metadata import json import logging @@ -554,6 +555,9 @@ def config_list_from_json( with open(config_list_path) as json_file: config_list = json.load(json_file) + + config_list = filter_config(config_list, filter_dict) + return filter_config(config_list, filter_dict) diff --git a/notebook/agentchat_structured_outputs_from_config.ipynb b/notebook/agentchat_structured_outputs_from_config.ipynb new file mode 100644 index 0000000000..9288db8de9 --- /dev/null +++ b/notebook/agentchat_structured_outputs_from_config.ipynb @@ -0,0 +1,245 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Structured output from json configuration\n", + "\n", + "Various LLM providers offer functionality for defining a structure of the messages generated by LLMs and AG2 enables this by propagating `response_format`, in the LLM configuration for your agents, to the underlying client.\n", + " \n", + "You can define the JSON structure of the output in the `response_format` field in the LLM configuration.\n", + "\n", + "To assist in determining the JSON structure, you can generate a valid schema using `.model_json_schema()` on a predefined pydantic model, for more info, see [here](https://docs.pydantic.dev/latest/concepts/json_schema/). Your schema should be [OpenAPI specification](https://github.com/OAI/OpenAPI-Specification) compliant and have a **title** field defined for the root model which will be loaded as a `response_format` for the Agent.\n", + "\n", + "For more info on structured outputs, see [our documentation](https://docs.ag2.ai/docs/user-guide/basic-concepts/structured-outputs).\n", + "\n", + "\n", + "````{=mdx}\n", + ":::info Requirements\n", + "Install `ag2`:\n", + "```bash\n", + "pip install ag2\n", + "```\n", + "\n", + "> **Note:** If you have been using `autogen` or `pyautogen`, all you need to do is upgrade it using: \n", + "> ```bash\n", + "> pip install -U autogen\n", + "> ```\n", + "> or \n", + "> ```bash\n", + "> pip install -U pyautogen\n", + "> ```\n", + "> as `pyautogen`, `autogen`, and `ag2` are aliases for the same PyPI package. \n", + "\n", + "\n", + "For more information, please refer to the [installation guide](/docs/user-guide/basic-concepts/installing-ag2).\n", + ":::\n", + "````" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Supported clients\n", + "AG2 has structured output support for following client providers:\n", + "- OpenAI (`openai`)\n", + "- Anthropic (`anthropic`)\n", + "- Google Gemini (`google`)\n", + "- Ollama (`ollama`)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set your API Endpoint\n", + "\n", + "The [`config_list_from_json`](https://docs.ag2.ai/docs/api-reference/autogen/config_list_from_json#config-list-from-json) function loads a list of configurations from an environment variable or a JSON file.\n", + "\n", + "Here is an example of a configuration using the `gpt-4o-mini` model that will use a `MathReasoning` response format. To use it, paste it into your `OAI_CONFIG_LIST` file and set the `api_key` to your OpenAI API key.\n", + "\n", + "```json\n", + "[\n", + " {\n", + " \"model\": \"gpt-4o-mini\",\n", + " \"api_key\": \"\",\n", + " \"response_format\": {\n", + " \"$defs\":{\n", + " \"Step\":{\n", + " \"properties\":{\n", + " \"explanation\":{\n", + " \"title\":\"Explanation\",\n", + " \"type\":\"string\"\n", + " },\n", + " \"output\":{\n", + " \"title\":\"Output\",\n", + " \"type\":\"string\"\n", + " }\n", + " },\n", + " \"required\":[\n", + " \"explanation\",\n", + " \"output\"\n", + " ],\n", + " \"title\":\"Step\",\n", + " \"type\":\"object\"\n", + " }\n", + " },\n", + " \"properties\":{\n", + " \"steps\":{\n", + " \"items\":{\n", + " \"$ref\":\"#/$defs/Step\"\n", + " },\n", + " \"title\":\"Steps\",\n", + " \"type\":\"array\"\n", + " },\n", + " \"final_answer\":{\n", + " \"title\":\"Final Answer\",\n", + " \"type\":\"string\"\n", + " }\n", + " },\n", + " \"required\":[\n", + " \"steps\",\n", + " \"final_answer\"\n", + " ],\n", + " \"title\":\"MathReasoning\",\n", + " \"type\":\"object\"\n", + " }, \n", + " \"tags\": [\"gpt-4o-mini-response-format\"]\n", + " }\n", + "]\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import autogen\n", + "\n", + "# Load the configuration including the response format\n", + "config_list = autogen.config_list_from_json(\n", + " \"OAI_CONFIG_LIST\",\n", + " filter_dict={\n", + " \"tags\": [\"gpt-4o-response-format\"],\n", + " },\n", + ")\n", + "\n", + "# Output the configuration, showing that it matches the configuration file.\n", + "config_list" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "````{=mdx}\n", + ":::tip\n", + "Learn more about configuring LLMs for agents [here](/docs/topics/llm_configuration).\n", + ":::\n", + "````" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: math reasoning\n", + "\n", + "Using structured output, we can enforce chain-of-thought reasoning in the model to output an answer in a structured, step-by-step way." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Define chat actors\n", + "\n", + "Now we can define the agents that will solve the posed math problem. \n", + "We will keep this example simple; we will use a `UserProxyAgent` to input the math problem and an `AssistantAgent` to solve it.\n", + "\n", + "The `AssistantAgent` will be constrained to solving the math problem step-by-step by using the `MathReasoning` response format we defined above.\n", + "\n", + "The `response_format` is added to the LLM configuration and then this configuration is applied to the agent." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + " \"cache_seed\": 42,\n", + "}\n", + "\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"User_proxy\",\n", + " system_message=\"A human admin.\",\n", + " human_input_mode=\"NEVER\",\n", + ")\n", + "\n", + "assistant = autogen.AssistantAgent(\n", + " name=\"Math_solver\",\n", + " llm_config=llm_config, # Response Format is in the configuration\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Start the chat\n", + "\n", + "Let's now start the chat and prompt the assistant to solve a simple equation. The assistant agent should return a response solving the equation using a step-by-step `MathReasoning` model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "summary = user_proxy.initiate_chat(\n", + " assistant, message=\"how can I solve 8x + 7 = -23\", max_turns=1, summary_method=\"last_msg\"\n", + ").summary\n", + "\n", + "summary" + ] + } + ], + "metadata": { + "front_matter": { + "description": "OpenAI offers a functionality for defining a structure of the messages generated by LLMs, AutoGen enables this functionality by propagating response_format passed to your agents to the underlying client.", + "tags": [ + "structured output" + ] + }, + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/test/agentchat/test_structured_output.py b/test/agentchat/test_structured_output.py index ad3e21baa0..9e5a6f5d71 100644 --- a/test/agentchat/test_structured_output.py +++ b/test/agentchat/test_structured_output.py @@ -13,21 +13,50 @@ import autogen -from ..conftest import Credentials - - -@pytest.mark.openai -def test_structured_output(credentials_gpt_4o: Credentials): - class ResponseModel(BaseModel): - question: str - short_answer: str - reasoning: str - difficulty: float - - config_list = credentials_gpt_4o.config_list +from ..conftest import ( + Credentials, + credentials_gemini_flash, + credentials_gpt_4o_mini, + suppress_gemini_resource_exhausted, +) + +credentials_structured_output = [ + pytest.param( + credentials_gpt_4o_mini.__name__, + marks=pytest.mark.openai, + ), + pytest.param( + credentials_gemini_flash.__name__, + marks=pytest.mark.gemini, + ), +] + + +class ResponseModel(BaseModel): + question: str + short_answer: str + reasoning: str + difficulty: float + + +@pytest.mark.parametrize( + "credentials_from_test_param", + credentials_structured_output, + indirect=True, +) +@pytest.mark.parametrize( + "response_format", + [ + ResponseModel, + ResponseModel.model_json_schema(), + ], +) +@suppress_gemini_resource_exhausted +def test_structured_output(credentials_from_test_param, response_format): + config_list = credentials_from_test_param.config_list for config in config_list: - config["response_format"] = ResponseModel + config["response_format"] = response_format llm_config = {"config_list": config_list, "cache_seed": 43} @@ -55,20 +84,20 @@ class ResponseModel(BaseModel): raise AssertionError(f"Agent did not return a structured report. Exception: {e}") -@pytest.mark.openai -def test_global_structured_output(credentials_gpt_4o: Credentials): - class ResponseModel(BaseModel): - question: str - short_answer: str - reasoning: str - difficulty: float - - config_list = credentials_gpt_4o.config_list +@pytest.mark.parametrize( + "credentials_from_test_param", + credentials_structured_output, + indirect=True, +) +@pytest.mark.parametrize("response_format", [ResponseModel, ResponseModel.model_json_schema()]) +@suppress_gemini_resource_exhausted +def test_structured_output_global(credentials_from_test_param, response_format): + config_list = credentials_from_test_param.config_list llm_config = { "config_list": config_list, "cache_seed": 43, - "response_format": ResponseModel, + "response_format": response_format, } user_proxy = autogen.UserProxyAgent( diff --git a/test/conftest.py b/test/conftest.py index 05cf881257..9897475424 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -398,7 +398,6 @@ def user_proxy() -> UserProxyAgent: ), ] - T = TypeVar("T", bound=Callable[..., Any]) diff --git a/test/oai/test_utils.py b/test/oai/test_utils.py index fe0376a5ed..7aa8871d58 100755 --- a/test/oai/test_utils.py +++ b/test/oai/test_utils.py @@ -17,7 +17,11 @@ import pytest import autogen -from autogen.oai.openai_utils import DEFAULT_AZURE_API_VERSION, filter_config, is_valid_api_key +from autogen.oai.openai_utils import ( + DEFAULT_AZURE_API_VERSION, + filter_config, + is_valid_api_key, +) from ..conftest import MOCK_OPEN_AI_API_KEY From 5ac5fe1ae44dd10721c761b0f584faa44e580d80 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Thu, 20 Feb 2025 16:09:42 +0530 Subject: [PATCH 2/2] Update PR comments automatically after 30 reports (#1066) --- .github/workflows/notify-codecov.yml | 32 ---------------------------- codecov.yml | 6 +++--- 2 files changed, 3 insertions(+), 35 deletions(-) delete mode 100644 .github/workflows/notify-codecov.yml diff --git a/.github/workflows/notify-codecov.yml b/.github/workflows/notify-codecov.yml deleted file mode 100644 index 800addd6f8..0000000000 --- a/.github/workflows/notify-codecov.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Notify Codecov - -on: - workflow_run: - workflows: - - "Contrib tests with LLMs" - - "Contrib tests without LLMs" - - "Core tests with LLMs" - - "Core tests without LLMs" - - "Integration tests" - - "Test with optional dependencies" - branches: - - '*' - types: - - completed - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} - cancel-in-progress: true -permissions: {} - -jobs: - notify: - runs-on: ubuntu-latest - # if: github.event.workflow_run.conclusion == 'success' - steps: - - name: Send Codecov notification - uses: codecov/codecov-action@v5 - with: - run_command: send-notifications - token: ${{ secrets.CODECOV_TOKEN }} - fail_ci_if_error: false diff --git a/codecov.yml b/codecov.yml index 90f04c129a..49283f0463 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,9 +1,9 @@ codecov: require_ci_to_pass: yes notify: - manual_trigger: true - # after_n_builds: 30 - # wait_for_ci: yes + # manual_trigger: true + after_n_builds: 30 + wait_for_ci: yes coverage: status: