Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add template for multi_tool_use #299

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions functionary/prompt_template/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
from functionary.prompt_template.llava_prompt_template import LlavaLlama
from functionary.prompt_template.prompt_template_v1 import PromptTemplateV1
from functionary.prompt_template.prompt_template_v2 import PromptTemplateV2
from functionary.prompt_template.llama31_multi_tool_use_template import (
MultiToolUseLlama31Template,
)


def get_available_prompt_template_versions() -> List[PromptTemplate]:
Expand All @@ -28,6 +31,7 @@ def get_available_prompt_template_versions() -> List[PromptTemplate]:
# directly add LLavaLlama as it is not a direct subclass of PromptTemplate but the subclass of: Llama3TemplateV3
# we don't use get_prompt_template or this will return the parent class
all_templates_obj.append(LlavaLlama.get_prompt_template())
all_templates_obj.append(MultiToolUseLlama31Template.get_prompt_template())

return all_templates_obj

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
{# version=v3-llama3.1 #}{%- if not tools is defined -%}
{%- set tools = none -%}
{%- endif -%}

{%- set has_code_interpreter = tools | selectattr("type", "equalto", "code_interpreter") | list | length > 0 -%}
{%- if has_code_interpreter -%}
{%- set tools = tools | rejectattr("type", "equalto", "code_interpreter") | list -%}
{%- endif -%}

{#- System message + builtin tools #}
{{- bos_token + "<|start_header_id|>system<|end_header_id|>\n\n" }}
{%- if has_code_interpreter %}
{{- "Environment: ipython\n\n" }}
{%- else -%}
{{ "\n"}}
{%- endif %}
{{- "Cutting Knowledge Date: December 2023\n\n" }}
{%- if tools %}
{{- "\nYou have access to the following functions:\n\n" }}
{%- for t in tools %}
{%- if "type" in t -%}
{{ "Use the function '" + t["function"]["name"] + "' to '" + t["function"]["description"] + "'\n" + t["function"] | tojson() }}
{%- else -%}
{{ "Use the function '" + t["name"] + "' to '" + t["description"] + "'\n" + t | tojson }}
{%- endif -%}
{{- "\n\n" }}
{%- endfor %}
{{- '\nThink very carefully before calling functions.\nIf a you choose to call a function ONLY reply in the following format:\n{start_tag}={function_name}>{parameters}{end_tag}\nwhere\n\nstart_tag => `<function`\nparameters => a JSON dict with the function argument name as key and function argument value as value.\nend_tag => `</function>`\n\nHere is an example,\n<function=example_function_name>{"example_name": "example_value"}</function>\n\nReminder:\n- Function calls MUST follow the specified format, start with <function= and end with </function>\n- Required parameters MUST be specified\n- Only call one function at a time\n- Put the entire function call reply on one line\n\n' -}}
{%- endif %}
{{- "<|eot_id|>" -}}

{%- for message in messages -%}
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }}
{%- elif message['role'] == 'tool' -%}
{{ '<|start_header_id|>ipython<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }}
{%- else -%}
{%- if (message['content'] and message['content']|length > 0) or ('tool_calls' in message and message['tool_calls'] and message['tool_calls']|length > 0) -%}
{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'}}
{%- endif -%}
{%- if message['content'] and message['content']|length > 0 -%}
{{ message['content'] }}
{%- endif -%}
{%- if 'tool_calls' in message and message['tool_calls'] and message['tool_calls']|length > 0 -%}
{%- for tool_call in message['tool_calls'] -%}
{%- if tool_call["function"]["name"] == "python" -%}
{{ '<|python_tag|>' + tool_call['function']['arguments'] }}
{%- else -%}
{{ '<function=' + tool_call['function']['name'] + '>' + tool_call['function']['arguments'] + '</function>' }}
{%- endif -%}
{%- endfor -%}
{{ '<|eom_id|>' }}
{%- elif message['content'] and message['content']|length > 0 -%}
{{ '<|eot_id|>' }}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif -%}
151 changes: 151 additions & 0 deletions functionary/prompt_template/llama31_multi_tool_use_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
import datetime
import json
import re
from typing import Any, Dict, List, Literal, Optional, Tuple, Union

from functionary.openai_types import Function, Tool
from functionary.prompt_template import prompt_utils
from functionary.prompt_template.llama31_prompt_template import Llama31Template


def return_multi_tool_use():
return {
"type": "function",
"function": {
"name": "multi_tool_use",
"description": "This tool serves as a wrapper for utilizing multiple tools. Each tool that can be used must be specified in the tool sections.\nEnsure that the parameters provided to each tool are valid according to that tool's specification.\nUse this function to run multiple tools simultaneously, but only if they can operate in parallel.",
"parameters": {
"type": "object",
"properties": {
"tool_uses": {
"type": "array",
"description": "The tools to be executed in parallel. NOTE: only functions tools are permitted",
"items": {
"type": "object",
"properties": {
"recipient_name": {
"type": "string",
"description": "The name of the tool to use. The format should either be just the name of the tool, or in the format namespace.function_name for plugin and function tools.",
},
"parameters": {
"type": "object",
"additionalProperties": True,
"description": "The parameters to pass to the tool. Ensure these are valid according to the tool's own specifications.",
},
},
"required": ["recipient_name", "parameters"],
},
},
},
"required": ["tool_uses"],
},
},
}


def merge_tool_calls(tool_calls: list[dict]) -> dict:
tool_uses = []
for tool_call in tool_calls:
tool_uses.append(
{
"recipient_name": tool_call["function"]["name"],
"parameters": json.loads(tool_call["function"]["arguments"]),
}
)
return {
"type": "function",
"function": {
"name": "multi_tool_use",
"arguments": json.dumps({"tool_uses": tool_uses}, ensure_ascii=False),
},
}


def convert_parallel_to_multi_tool_use_example(
messages: List[Dict], tools: List[Dict]
) -> Tuple[List[Dict], List[Dict]]:
# add multi_tool_use tool
all_tools = tools + [return_multi_tool_use()]
merged_messages = []
for message in messages:
tool_calls = message.get("tool_calls", []) or []
if len(tool_calls) > 0:
if len(tool_calls) > 1:
# print("mesage 0: ", messages[0]["content"])
merged_tool_call = merge_tool_calls(tool_calls)
merged_messages.append(
{
"role": "assistant",
"content": message.get("content", None),
"tool_calls": [merged_tool_call],
}
)
else:
merged_messages.append(message)
else:
merged_messages.append(message)

return all_tools, merged_messages


class MultiToolUseLlama31Template(Llama31Template):
version = "v3-llama3.1-multi-tool-use"

def get_prompt_from_messages(
self,
messages: List[Dict],
tools_or_functions: Optional[List[Dict]] = None,
bos_token: Optional[str] = "",
add_generation_prompt: bool = False,
) -> str:
"""This function is used to get the complete prompt for list of messages

Args:
messages (List[Dict]): List of messages
tools_or_functions (Optional[List[Dict]], optional): List of tools or functions. Defaults to None.

Returns:
str: the prompt for inference/training
"""
if not tools_or_functions:
all_tools, merged_messages = [], messages
else:
all_tools, merged_messages = convert_parallel_to_multi_tool_use_example(
messages, tools_or_functions
)
return super().get_prompt_from_messages(
merged_messages, all_tools, bos_token, add_generation_prompt
)

def parse_assistant_response(
self, llm_output: str, tool_choice: Any = None
) -> Dict:
assistant_response = super().parse_assistant_response(llm_output, tool_choice)
tool_calls = assistant_response.get("tool_calls", [])
n_tool_calls = []
if tool_calls:
for tool_call in tool_calls:
if tool_call["function"]["name"] == "multi_tool_use":
sub_tool_calls = []
tool_use_list = json.loads(tool_call["function"]["arguments"])["tool_uses"]
for tool_use in tool_use_list:
sub_tool_calls.append(
{
"id": prompt_utils.get_random_tool_call_id(),
"type": "function",
"function": {
"name": tool_use["recipient_name"],
"arguments": json.dumps(
tool_use["parameters"], ensure_ascii=False
),
},
}
)
n_tool_calls.extend(sub_tool_calls)
else:
n_tool_calls.append(tool_call)
return {
"role": "assistant",
"content": assistant_response.get("content", None),
"tool_calls": n_tool_calls,
}
84 changes: 84 additions & 0 deletions tests/prompt_test_v3-llama3.1-multi-tool-use.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
<|start_header_id|>system<|end_header_id|>

Environment: ipython

Cutting Knowledge Date: December 2023


You have access to the following functions:

Use the function 'get_car_price' to 'Get the price of a particular car model'
{"name": "get_car_price", "description": "Get the price of a particular car model", "parameters": {"type": "object", "properties": {"car_name": {"type": "string", "description": "The name of the car model"}}, "required": ["car_name"]}}

Use the function 'get_weather' to 'This function's purpose is to get the weather of a location'
{"name": "get_weather", "description": "This function's purpose is to get the weather of a location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "where to get weather"}}, "required": ["location"]}}

Use the function 'multi_tool_use' to 'This tool serves as a wrapper for utilizing multiple tools. Each tool that can be used must be specified in the tool sections.
Ensure that the parameters provided to each tool are valid according to that tool's specification.
Use this function to run multiple tools simultaneously, but only if they can operate in parallel.'
{"name": "multi_tool_use", "description": "This tool serves as a wrapper for utilizing multiple tools. Each tool that can be used must be specified in the tool sections.\nEnsure that the parameters provided to each tool are valid according to that tool's specification.\nUse this function to run multiple tools simultaneously, but only if they can operate in parallel.", "parameters": {"type": "object", "properties": {"tool_uses": {"type": "array", "description": "The tools to be executed in parallel. NOTE: only functions tools are permitted", "items": {"type": "object", "properties": {"recipient_name": {"type": "string", "description": "The name of the tool to use. The format should either be just the name of the tool, or in the format namespace.function_name for plugin and function tools."}, "parameters": {"type": "object", "additionalProperties": true, "description": "The parameters to pass to the tool. Ensure these are valid according to the tool's own specifications."}}, "required": ["recipient_name", "parameters"]}}}, "required": ["tool_uses"]}}


Think very carefully before calling functions.
If a you choose to call a function ONLY reply in the following format:
{start_tag}={function_name}>{parameters}{end_tag}
where

start_tag => `<function`
parameters => a JSON dict with the function argument name as key and function argument value as value.
end_tag => `</function>`

Here is an example,
<function=example_function_name>{"example_name": "example_value"}</function>

Reminder:
- Function calls MUST follow the specified format, start with <function= and end with </function>
- Required parameters MUST be specified
- Only call one function at a time
- Put the entire function call reply on one line

<|eot_id|><|start_header_id|>user<|end_header_id|>

who is the CEO of Meetkai<|eot_id|><|start_header_id|>assistant<|end_header_id|>

James Kaplan is the Co-Founder and CEO of MeetKai Inc.<|eot_id|><|start_header_id|>user<|end_header_id|>

is the car Song more expensive than car Tang?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

I will get the price of 2 cars and compare<function=multi_tool_use>{"tool_uses": [{"recipient_name": "get_car_price", "parameters": {"car_name": "Song"}}, {"recipient_name": "get_car_price", "parameters": {"car_name": "Tang"}}]}</function><|eom_id|><|start_header_id|>ipython<|end_header_id|>

{"price": {"price": "$25000"}}<|eot_id|><|start_header_id|>ipython<|end_header_id|>

{"price": {"price": "$20000"}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>

No, the car Tang is less expensive than the car Song. The car Song is priced at $25,000, while the car Tang is priced at $20,000.<|eot_id|><|start_header_id|>user<|end_header_id|>

what's the weather like in Hanoi?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

<function=get_weather>{"location": "Hanoi"}</function><|eom_id|><|start_header_id|>ipython<|end_header_id|>

{"result": {"temperature": 10}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>

The temperature in Hanoi is: 10 degree Celcious<|eot_id|><|start_header_id|>user<|end_header_id|>

Given the list of strings: '0','1','2','3','4','5' remove the number in the list that is close to 3.6 the most<|eot_id|><|start_header_id|>assistant<|end_header_id|>

I'll use code interpreter to handle this<|python_tag|>l=[int('0'),int('1'),int('2'),int('3'),int('4'),int('5')]
l.remove(3.6)<|eom_id|><|start_header_id|>ipython<|end_header_id|>

ValueError: list.remove(x): x not in list<|eot_id|><|start_header_id|>assistant<|end_header_id|>

I will fix the code<|python_tag|>l.remove(round(3.6))
l<|eom_id|><|start_header_id|>ipython<|end_header_id|>

[0,1,2,3,5,]<|eot_id|><|start_header_id|>assistant<|end_header_id|>

The final list is: 0,1,2,3,5<|eot_id|><|start_header_id|>user<|end_header_id|>

Thanks! What's the weather in San Francisco?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

<function=get_weather>{"location": "San Francisco, CA"}</function><|eom_id|><|start_header_id|>ipython<|end_header_id|>

{"result": {"temperature": 20}}<|eot_id|><|start_header_id|>assistant<|end_header_id|>

The temperature in San Francisco is: 20 degree Celcious<|eot_id|>
1 change: 1 addition & 0 deletions tests/test_prompt_creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def __init__(self, *args, **kwargs):
"v2.llama3": "meetkai/functionary-small-v2.5",
"v3.llama3": "meetkai/functionary-medium-v3.0",
"v3-llama3.1": "meetkai/functionary-small-v3.1",
"v3-llama3.1-multi-tool-use": "meetkai/functionary-small-v3.1",
}
self.image_template_version_to_model_name = {
"v3.llava_llama": "meetkai/functionary-vision-small-v0.1"
Expand Down
Loading