Skip to content

Commit

Permalink
Fixed Phi 3, Fixed Function Call Result Message and changed some exam…
Browse files Browse the repository at this point in the history
…ples.
  • Loading branch information
Maximilian-Winter committed May 12, 2024
1 parent 926fb91 commit da00fa8
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 21 deletions.
8 changes: 4 additions & 4 deletions examples/02_Structured_Output/output_knowledge_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
from llama_cpp_agent import LlamaCppAgent
from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings, LlmStructuredOutputType
from llama_cpp_agent import MessagesFormatterType
from llama_cpp_agent.providers import TGIServerProvider
from llama_cpp_agent.providers import LlamaCppServerProvider

provider = TGIServerProvider("http://localhost:8080")
provider = LlamaCppServerProvider("http://localhost:8080")


class Node(BaseModel):
Expand Down Expand Up @@ -54,7 +54,7 @@ def visualize_knowledge_graph(kg):


def generate_graph(user_input: str):
prompt = f"""Help me understand the following by describing it as a extremely detailed knowledge graph with at least 20 nodes: {user_input}""".strip()
prompt = f"""Help me understand the following by describing it as a extremely detailed knowledge graph with at least 40 nodes: {user_input}""".strip()
response = agent.get_chat_response(
message=prompt,
structured_output_settings=output_settings
Expand All @@ -63,5 +63,5 @@ def generate_graph(user_input: str):
return response


graph = generate_graph("Teach me about quantum mechanics")
graph = generate_graph("large language models.")
visualize_knowledge_graph(graph)
18 changes: 13 additions & 5 deletions examples/03_Tools_And_Function_Calling/function_calling_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from llama_cpp_agent import LlamaCppFunctionTool
from llama_cpp_agent import FunctionCallingAgent
from llama_cpp_agent import MessagesFormatterType
from llama_cpp_agent.providers import TGIServerProvider
from llama_cpp_agent.providers import LlamaCppServerProvider

model = TGIServerProvider("http://localhost:8080")
model = LlamaCppServerProvider("http://localhost:8080")


# Simple tool for the agent, to get the current date and time in a specific format.
Expand Down Expand Up @@ -93,7 +93,7 @@ def get_current_weather(location, unit):

# Callback for receiving messages for the user.
def send_message_to_user_callback(message: str):
print(message)
print("Assistant: " + message.strip())

# First we create the calculator tool.
calculator_function_tool = LlamaCppFunctionTool(calculator)
Expand All @@ -110,8 +110,16 @@ def send_message_to_user_callback(message: str):
llama_cpp_function_tools=[calculator_function_tool, current_datetime_function_tool, get_weather_function_tool],
send_message_to_user_callback=send_message_to_user_callback,
allow_parallel_function_calling=True,
debug_output=False,
messages_formatter_type=MessagesFormatterType.CHATML)

user_input = "What is the current weather in London celsius?"
function_call_agent.generate_response(user_input)
user_input = '''Get the date and time in '%d-%m-%Y %H:%M' format. Get the current weather in celsius in London, New York and at the North Pole. Solve the following calculations: 42 * 42, 74 + 26, 7 * 26, 4 + 6 and 96/8.'''
print("User: " + user_input)

settings = model.get_provider_default_settings()
settings.add_additional_stop_sequences(["<|end|>"])
settings.stream = False
settings.temperature = 0.65

function_call_agent.generate_response(user_input, llm_sampling_settings=settings)

4 changes: 2 additions & 2 deletions examples/04_Chains/math_operation_greeting.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
from llama_cpp_agent import AgentChainElement, AgentChain
from llama_cpp_agent import LlamaCppAgent
from llama_cpp_agent import MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppServerProvider
from llama_cpp_agent.providers import TGIServerProvider

model = LlamaCppServerProvider("http://127.0.0.1:8080")
model = TGIServerProvider("http://127.0.0.1:8080")

agent = LlamaCppAgent(
model,
Expand Down
2 changes: 1 addition & 1 deletion examples/05_Rag/example_synthetic_diamonds_bars.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
rag.add_document(split)

# Define a llamacpp server endpoint.
from llama_cpp_agent.providers.llama_cpp_server import LlamaCppServerProvider
from llama_cpp_agent.providers import LlamaCppServerProvider

model = LlamaCppServerProvider("http://127.0.0.1:8080")

Expand Down
2 changes: 1 addition & 1 deletion src/llama_cpp_agent/chat_history/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def convert_messages_to_list_of_dictionaries(
else:
content = f"Function Call:\nFunction: {message.tool_calls[0].function.name}\nArguments: {message.tool_calls[0].function.arguments}\n"
elif isinstance(message, ToolMessage):
content = f"Function Call Result:\nResult: {message.content}\n"
content = f"{message.content}\n"
else:
content = f"{message.content}"
# Construct the dictionary for the current message
Expand Down
16 changes: 8 additions & 8 deletions src/llama_cpp_agent/messages_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,12 +127,12 @@
FUNCTION_PROMPT_END_OPEN_CHAT = """"""
DEFAULT_OPEN_CHAT_STOP_SEQUENCES = ["<|end_of_turn|>"]

SYS_PROMPT_START_PHI_3 = """"""
SYS_PROMPT_END_PHI_3 = """\n\n"""
USER_PROMPT_START_PHI_3 = """<|user|>"""
USER_PROMPT_END_PHI_3 = """<|end|>\n"""
ASSISTANT_PROMPT_START_PHI_3 = """<|assistant|>"""
ASSISTANT_PROMPT_END_PHI_3 = """<|end|>\n"""
SYS_PROMPT_START_PHI_3 = """<|system|>\n"""
SYS_PROMPT_END_PHI_3 = """<|endoftext|>\n"""
USER_PROMPT_START_PHI_3 = """<|user|>\n"""
USER_PROMPT_END_PHI_3 = """<|endoftext|>\n"""
ASSISTANT_PROMPT_START_PHI_3 = """<|assistant|>\n"""
ASSISTANT_PROMPT_END_PHI_3 = """<|endoftext|>\n"""
FUNCTION_PROMPT_START_PHI_3 = """"""
FUNCTION_PROMPT_END_PHI_3 = """"""
DEFAULT_PHI_3_STOP_SEQUENCES = ["<|end|>", "<|end_of_turn|>"]
Expand Down Expand Up @@ -546,11 +546,11 @@ def as_dict(self) -> dict:
USER_PROMPT_END_PHI_3,
ASSISTANT_PROMPT_START_PHI_3,
ASSISTANT_PROMPT_END_PHI_3,
True,
False,
DEFAULT_PHI_3_STOP_SEQUENCES,
True,
FUNCTION_PROMPT_START_PHI_3,
FUNCTION_PROMPT_END_PHI_3,
FUNCTION_PROMPT_END_PHI_3
)

predefined_formatter = {
Expand Down

0 comments on commit da00fa8

Please sign in to comment.