diff --git a/examples/02_Structured_Output/output_knowledge_graph.py b/examples/02_Structured_Output/output_knowledge_graph.py index 1063630..c5fafbf 100644 --- a/examples/02_Structured_Output/output_knowledge_graph.py +++ b/examples/02_Structured_Output/output_knowledge_graph.py @@ -6,9 +6,9 @@ from llama_cpp_agent import LlamaCppAgent from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings, LlmStructuredOutputType from llama_cpp_agent import MessagesFormatterType -from llama_cpp_agent.providers import TGIServerProvider +from llama_cpp_agent.providers import LlamaCppServerProvider -provider = TGIServerProvider("http://localhost:8080") +provider = LlamaCppServerProvider("http://localhost:8080") class Node(BaseModel): @@ -54,7 +54,7 @@ def visualize_knowledge_graph(kg): def generate_graph(user_input: str): - prompt = f"""Help me understand the following by describing it as a extremely detailed knowledge graph with at least 20 nodes: {user_input}""".strip() + prompt = f"""Help me understand the following by describing it as a extremely detailed knowledge graph with at least 40 nodes: {user_input}""".strip() response = agent.get_chat_response( message=prompt, structured_output_settings=output_settings @@ -63,5 +63,5 @@ def generate_graph(user_input: str): return response -graph = generate_graph("Teach me about quantum mechanics") +graph = generate_graph("large language models.") visualize_knowledge_graph(graph) diff --git a/examples/03_Tools_And_Function_Calling/function_calling_agent.py b/examples/03_Tools_And_Function_Calling/function_calling_agent.py index 72dbaca..8fb6650 100644 --- a/examples/03_Tools_And_Function_Calling/function_calling_agent.py +++ b/examples/03_Tools_And_Function_Calling/function_calling_agent.py @@ -8,9 +8,9 @@ from llama_cpp_agent import LlamaCppFunctionTool from llama_cpp_agent import FunctionCallingAgent from llama_cpp_agent import MessagesFormatterType -from llama_cpp_agent.providers import TGIServerProvider +from llama_cpp_agent.providers import LlamaCppServerProvider -model = TGIServerProvider("http://localhost:8080") +model = LlamaCppServerProvider("http://localhost:8080") # Simple tool for the agent, to get the current date and time in a specific format. @@ -93,7 +93,7 @@ def get_current_weather(location, unit): # Callback for receiving messages for the user. def send_message_to_user_callback(message: str): - print(message) + print("Assistant: " + message.strip()) # First we create the calculator tool. calculator_function_tool = LlamaCppFunctionTool(calculator) @@ -110,8 +110,16 @@ def send_message_to_user_callback(message: str): llama_cpp_function_tools=[calculator_function_tool, current_datetime_function_tool, get_weather_function_tool], send_message_to_user_callback=send_message_to_user_callback, allow_parallel_function_calling=True, + debug_output=False, messages_formatter_type=MessagesFormatterType.CHATML) -user_input = "What is the current weather in London celsius?" -function_call_agent.generate_response(user_input) +user_input = '''Get the date and time in '%d-%m-%Y %H:%M' format. Get the current weather in celsius in London, New York and at the North Pole. Solve the following calculations: 42 * 42, 74 + 26, 7 * 26, 4 + 6 and 96/8.''' +print("User: " + user_input) + +settings = model.get_provider_default_settings() +settings.add_additional_stop_sequences(["<|end|>"]) +settings.stream = False +settings.temperature = 0.65 + +function_call_agent.generate_response(user_input, llm_sampling_settings=settings) diff --git a/examples/04_Chains/math_operation_greeting.py b/examples/04_Chains/math_operation_greeting.py index a333851..23d417d 100644 --- a/examples/04_Chains/math_operation_greeting.py +++ b/examples/04_Chains/math_operation_greeting.py @@ -5,9 +5,9 @@ from llama_cpp_agent import AgentChainElement, AgentChain from llama_cpp_agent import LlamaCppAgent from llama_cpp_agent import MessagesFormatterType -from llama_cpp_agent.providers import LlamaCppServerProvider +from llama_cpp_agent.providers import TGIServerProvider -model = LlamaCppServerProvider("http://127.0.0.1:8080") +model = TGIServerProvider("http://127.0.0.1:8080") agent = LlamaCppAgent( model, diff --git a/examples/05_Rag/example_synthetic_diamonds_bars.py b/examples/05_Rag/example_synthetic_diamonds_bars.py index d2fd232..c7511f7 100644 --- a/examples/05_Rag/example_synthetic_diamonds_bars.py +++ b/examples/05_Rag/example_synthetic_diamonds_bars.py @@ -39,7 +39,7 @@ rag.add_document(split) # Define a llamacpp server endpoint. -from llama_cpp_agent.providers.llama_cpp_server import LlamaCppServerProvider +from llama_cpp_agent.providers import LlamaCppServerProvider model = LlamaCppServerProvider("http://127.0.0.1:8080") diff --git a/src/llama_cpp_agent/chat_history/messages.py b/src/llama_cpp_agent/chat_history/messages.py index 03577eb..67a4d71 100644 --- a/src/llama_cpp_agent/chat_history/messages.py +++ b/src/llama_cpp_agent/chat_history/messages.py @@ -95,7 +95,7 @@ def convert_messages_to_list_of_dictionaries( else: content = f"Function Call:\nFunction: {message.tool_calls[0].function.name}\nArguments: {message.tool_calls[0].function.arguments}\n" elif isinstance(message, ToolMessage): - content = f"Function Call Result:\nResult: {message.content}\n" + content = f"{message.content}\n" else: content = f"{message.content}" # Construct the dictionary for the current message diff --git a/src/llama_cpp_agent/messages_formatter.py b/src/llama_cpp_agent/messages_formatter.py index 7e08e5f..e83e9fa 100644 --- a/src/llama_cpp_agent/messages_formatter.py +++ b/src/llama_cpp_agent/messages_formatter.py @@ -127,12 +127,12 @@ FUNCTION_PROMPT_END_OPEN_CHAT = """""" DEFAULT_OPEN_CHAT_STOP_SEQUENCES = ["<|end_of_turn|>"] -SYS_PROMPT_START_PHI_3 = """""" -SYS_PROMPT_END_PHI_3 = """\n\n""" -USER_PROMPT_START_PHI_3 = """<|user|>""" -USER_PROMPT_END_PHI_3 = """<|end|>\n""" -ASSISTANT_PROMPT_START_PHI_3 = """<|assistant|>""" -ASSISTANT_PROMPT_END_PHI_3 = """<|end|>\n""" +SYS_PROMPT_START_PHI_3 = """<|system|>\n""" +SYS_PROMPT_END_PHI_3 = """<|endoftext|>\n""" +USER_PROMPT_START_PHI_3 = """<|user|>\n""" +USER_PROMPT_END_PHI_3 = """<|endoftext|>\n""" +ASSISTANT_PROMPT_START_PHI_3 = """<|assistant|>\n""" +ASSISTANT_PROMPT_END_PHI_3 = """<|endoftext|>\n""" FUNCTION_PROMPT_START_PHI_3 = """""" FUNCTION_PROMPT_END_PHI_3 = """""" DEFAULT_PHI_3_STOP_SEQUENCES = ["<|end|>", "<|end_of_turn|>"] @@ -546,11 +546,11 @@ def as_dict(self) -> dict: USER_PROMPT_END_PHI_3, ASSISTANT_PROMPT_START_PHI_3, ASSISTANT_PROMPT_END_PHI_3, - True, + False, DEFAULT_PHI_3_STOP_SEQUENCES, True, FUNCTION_PROMPT_START_PHI_3, - FUNCTION_PROMPT_END_PHI_3, + FUNCTION_PROMPT_END_PHI_3 ) predefined_formatter = {