diff --git a/docs/hub/mistral.md b/docs/hub/mistral.md new file mode 100644 index 000000000..eed449336 --- /dev/null +++ b/docs/hub/mistral.md @@ -0,0 +1,74 @@ +--- +draft: False +date: 2024-02-26 +slug: mistral +tags: + - patching +authors: + - shanktt +--- + +# Structured Outputs with Mistral Large + +If you want to try this example using `instructor hub`, you can pull it by running + +```bash +instructor hub pull --slug mistral --py > mistral_example.py +``` + +Mistral Large is the flagship model from Mistral AI, supporting 32k context windows and functional calling abilities. Mistral Large's addition of [function calling](https://docs.mistral.ai/guides/function-calling/) makes it possible to obtain structured outputs using JSON schema. + +By the end of this blog post, you will learn how to effectively utilize Instructor with Mistral Large. + + + +## Patching + +Instructor's patch enhances the mistral api with the following features: + +- `response_model` in `create` calls that returns a pydantic model +- `max_retries` in `create` calls that retries the call if it fails by using a backoff strategy + +!!! note "Learn More" + + To learn more, please refer to the [docs](../index.md). To understand the benefits of using Pydantic with Instructor, visit the tips and tricks section of the [why use Pydantic](../why.md) page. + +## Mistral Client + +The Mistral client employs a different client than OpenAI, making the patching process slightly different than other examples + +!!! note "Getting access" + + If you want to try this out for yourself check out the [Mistral AI](https://mistral.ai/) website. You can get started [here](https://docs.mistral.ai/). + + +```python +from pydantic import BaseModel +from mistralai.client import MistralClient +from instructor.patch import patch +from instructor.function_calls import Mode + + +class UserDetails(BaseModel): + name: str + age: int + + +# enables `response_model` in chat call +client = MistralClient() +patched_chat = patch(create=client.chat, mode=Mode.MISTRAL_TOOLS) + +resp = patched_chat( + model="mistral-large-latest", + response_model=UserDetails, + messages=[ + { + "role": "user", + "content": f'Extract the following entities: "Jason is 20"', + }, + ], +) +print(resp) +#> name='Jason' age=20 +``` + diff --git a/examples/mistral/mistral.py b/examples/mistral/mistral.py new file mode 100644 index 000000000..25fcf09df --- /dev/null +++ b/examples/mistral/mistral.py @@ -0,0 +1,26 @@ +from pydantic import BaseModel +from mistralai.client import MistralClient +from instructor.patch import patch +from instructor.function_calls import Mode + + +class UserDetails(BaseModel): + name: str + age: int + + +# enables `response_model` in chat call +client = MistralClient() +patched_chat = patch(create=client.chat, mode=Mode.MISTRAL_TOOLS) + +resp = patched_chat( + model="mistral-large-latest", + response_model=UserDetails, + messages=[ + { + "role": "user", + "content": f'Extract the following entities: "Jason is 20"', + }, + ], +) +print(resp) \ No newline at end of file diff --git a/instructor/function_calls.py b/instructor/function_calls.py index f24f9a59d..ce5467f67 100644 --- a/instructor/function_calls.py +++ b/instructor/function_calls.py @@ -15,6 +15,7 @@ class Mode(enum.Enum): FUNCTIONS: str = "function_call" PARALLEL_TOOLS: str = "parallel_tool_call" TOOLS: str = "tool_call" + MISTRAL_TOOLS: str = "mistral_tools" JSON: str = "json_mode" MD_JSON: str = "markdown_json_mode" JSON_SCHEMA: str = "json_schema_mode" @@ -114,7 +115,7 @@ def from_response( context=validation_context, strict=strict, ) - elif mode == Mode.TOOLS: + elif mode in {Mode.TOOLS, Mode.MISTRAL_TOOLS}: assert ( len(message.tool_calls) == 1 ), "Instructor does not support multiple tool calls, use List[Model] instead." diff --git a/instructor/patch.py b/instructor/patch.py index 158fa84e9..fe4c1f6be 100644 --- a/instructor/patch.py +++ b/instructor/patch.py @@ -116,17 +116,20 @@ def handle_response_model( if mode == Mode.FUNCTIONS: new_kwargs["functions"] = [response_model.openai_schema] # type: ignore new_kwargs["function_call"] = {"name": response_model.openai_schema["name"]} # type: ignore - elif mode == Mode.TOOLS: + elif mode in {Mode.TOOLS, Mode.MISTRAL_TOOLS}: new_kwargs["tools"] = [ { "type": "function", "function": response_model.openai_schema, } ] - new_kwargs["tool_choice"] = { - "type": "function", - "function": {"name": response_model.openai_schema["name"]}, - } + if mode == Mode.MISTRAL_TOOLS: + new_kwargs["tool_choice"] = "any" + else: + new_kwargs["tool_choice"] = { + "type": "function", + "function": {"name": response_model.openai_schema["name"]}, + } elif mode in {Mode.JSON, Mode.MD_JSON, Mode.JSON_SCHEMA}: # If its a JSON Mode we need to massage the prompt a bit # in order to get the response we want in a json format diff --git a/mkdocs.yml b/mkdocs.yml index 2941293ec..dc5947cc5 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -177,7 +177,7 @@ nav: - Action Items: 'hub/action_items.md' - Partial Streaming: 'hub/partial_streaming.md' - Extract Contact Info: 'hub/extract_contact_info.md' - + - Using Mistral Large: 'hub/mistral.md' - Tutorials: - Tutorials (Notebooks): 'tutorials/1-introduction.ipynb' - Tips and Tricks: 'tutorials/2-tips.ipynb'