Skip to content

Commit

Permalink
Fixed a lot of bugs and imporved grammar generator.
Browse files Browse the repository at this point in the history
  • Loading branch information
Maximilian-Winter committed Jan 12, 2024
1 parent aa56812 commit cf1f03e
Show file tree
Hide file tree
Showing 21 changed files with 274 additions and 210 deletions.
2 changes: 1 addition & 1 deletion examples/example_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from llama_cpp_agent.function_calling import LlamaCppFunctionTool

function_tools = [LlamaCppFunctionTool(GetFileList), LlamaCppFunctionTool(ReadTextFile),
LlamaCppFunctionTool(WriteTextFile, has_triple_quoted_string=True)]
LlamaCppFunctionTool(WriteTextFile, has_triple_quoted_string=True, triple_quoted_string_field_name="file_content")]

function_tool_registry = LlamaCppAgent.get_function_tool_registry(function_tools)

Expand Down
2 changes: 1 addition & 1 deletion examples/example_agent_auto_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from llama_cpp_agent.function_calling import LlamaCppFunctionTool

function_tools = [LlamaCppFunctionTool(SendMessageToUser), LlamaCppFunctionTool(GetFileList), LlamaCppFunctionTool(ReadTextFile),
LlamaCppFunctionTool(WriteTextFile, has_triple_quoted_string=True)]
LlamaCppFunctionTool(WriteTextFile, has_triple_quoted_string=True, triple_quoted_string_field_name="file_content")]

function_tool_registry = LlamaCppAgent.get_function_tool_registry(function_tools)

Expand Down
8 changes: 4 additions & 4 deletions examples/example_agent_models_auto_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
base_folder = "dev"



def agent_dev_folder_setup(custom_base_folder=None):
global base_folder
base_folder = custom_base_folder
Expand Down Expand Up @@ -46,8 +45,9 @@ class WriteTextFile(BaseModel):

# Allow free output for the File Content to Enhance LLM Output

triple_quoted_string: str = Field(...,
description="Triple quoted string for unconstrained output.")
file_content: str = Field(...,
description="Triple quoted string for unconstrained output.", triple_quoted_string=True)

def run(self):

if self.directory == "":
Expand Down Expand Up @@ -84,7 +84,7 @@ def run(self):

# Write back to file
with open(file_path, write_mode, encoding="utf-8") as file:
file.writelines(self.triple_quoted_string)
file.writelines(self.file_content)

return f"Content written to '{self.filename_without_extension}{self.filename_extension}'."

Expand Down
8 changes: 3 additions & 5 deletions examples/example_book_dataset_creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
from pydantic import BaseModel, Field

from llama_cpp_agent.llm_agent import LlamaCppAgent
from llama_cpp_agent.gbnf_grammar_generator.gbnf_grammar_from_pydantic_models import generate_gbnf_grammar_and_documentation
from llama_cpp_agent.gbnf_grammar_generator.gbnf_grammar_from_pydantic_models import \
generate_gbnf_grammar_and_documentation
from llama_cpp_agent.providers.llama_cpp_server_provider import LlamaCppServerLLMSettings

main_model = LlamaCppServerLLMSettings(
completions_endpoint_url="http://127.0.0.1:8080/completion"
)


text = """The Feynman Lectures on Physics is a physics textbook based on some lectures by Richard Feynman, a Nobel laureate who has sometimes been called "The Great Explainer". The lectures were presented before undergraduate students at the California Institute of Technology (Caltech), during 1961–1963. The book's co-authors are Feynman, Robert B. Leighton, and Matthew Sands."""


Expand All @@ -38,8 +38,6 @@ class Book(BaseModel):
gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation([Book])

llama_cpp_agent = LlamaCppAgent(main_model, debug_output=True,
system_prompt="You are an advanced AI, tasked to create JSON database entries for books.\n\n\n" + documentation)

system_prompt="You are an advanced AI, tasked to create JSON database entries for books.\n\n\n" + documentation)

print(llama_cpp_agent.get_chat_response(text, temperature=0.15, grammar=gbnf_grammar))

4 changes: 2 additions & 2 deletions examples/example_dataframe_creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class Database(BaseModel):
)


gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation([Database], False,
gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation([Database],
model_prefix="Response Model",
fields_prefix="Response Model Field")

Expand All @@ -73,7 +73,7 @@ class Database(BaseModel):

def dataframe(data: str) -> Database:
prompt = data
response = llama_cpp_agent.get_chat_response(message=prompt, temperature=0.25, grammar=gbnf_grammar)
response = llama_cpp_agent.get_chat_response(message=prompt, temperature=0.65, grammar=gbnf_grammar)

database = extract_object_from_response(response, Database)
return database
Expand Down
4 changes: 3 additions & 1 deletion examples/example_embodied_function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@

function_tools = [LlamaCppFunctionTool(SendMessageToUser), LlamaCppFunctionTool(GetFileList),
LlamaCppFunctionTool(ReadTextFile),
LlamaCppFunctionTool(WriteTextFile, has_triple_quoted_string=True)]
LlamaCppFunctionTool(WriteTextFile, has_triple_quoted_string=True,
triple_quoted_string_field_name="file_content")]

function_tool_registry = LlamaCppAgent.get_function_tool_registry(function_tools)

Expand Down Expand Up @@ -42,6 +43,7 @@
system_prompt=system_prompt,
predefined_messages_formatter_type=MessagesFormatterType.CHATML)

print(function_tool_registry.gbnf_grammar)
response = llama_cpp_agent.get_chat_response(
'Write a engaging rap song about the drug problem in the USA in the "USARap.txt" file under "./".',
temperature=0.75, function_tool_registry=function_tool_registry)
2 changes: 1 addition & 1 deletion examples/example_function_calling_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def send_message_to_user_callback(message: str):
pydantic_functions=[Calculator],
# Callback for receiving messages for the user.
send_message_to_user_callback=send_message_to_user_callback, debug_output=True)

print(function_call_agent.tool_registry.gbnf_grammar)
while True:
user_input = input(">")
function_call_agent.generate_response(user_input)
Expand Down
9 changes: 4 additions & 5 deletions examples/example_knowledge_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,15 @@ class KnowledgeGraph(BaseModel):
edges: List[Edge] = Field(..., default_factory=list)


gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation([KnowledgeGraph], False,
gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation([KnowledgeGraph],
model_prefix="Response Model",
fields_prefix="Response Model Field")

print(gbnf_grammar)
grammar = LlamaGrammar.from_string(gbnf_grammar, verbose=True)

llama_cpp_agent = LlamaCppAgent(main_model, debug_output=True,
system_prompt="You are an advanced AI assistant responding in JSON format.\n\nAvailable JSON response models:\n\n" + documentation,
predefined_messages_formatter_type=MessagesFormatterType.CHATML)
system_prompt="You are an advanced AI assistant responding in JSON format.\n\nAvailable JSON response models:\n\n" + documentation,
predefined_messages_formatter_type=MessagesFormatterType.CHATML)

from graphviz import Digraph

Expand All @@ -78,7 +77,7 @@ def visualize_knowledge_graph(kg: KnowledgeGraph):
def generate_graph(user_input: str) -> KnowledgeGraph:
prompt = f'''Help me understand the following by describing it as a extremely detailed knowledge graph with at least 20 nodes: {user_input}'''.strip()
response = llama_cpp_agent.get_chat_response(message=prompt, temperature=0.65, mirostat_mode=0, mirostat_tau=5.0,
mirostat_eta=0.1, grammar=grammar)
mirostat_eta=0.1, grammar=gbnf_grammar)
knowledge_graph = json.loads(response)
cls = KnowledgeGraph
knowledge_graph = cls(**knowledge_graph)
Expand Down
4 changes: 2 additions & 2 deletions examples/example_manual_function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ def calculate_a_to_the_power_b(a: Union[int | float], b: Union[int | float]):

DynamicSampleModel = create_dynamic_model_from_function(calculate_a_to_the_power_b)

grammar, documentation = generate_gbnf_grammar_and_documentation([DynamicSampleModel], root_rule_class="function",
root_rule_content="params")
grammar, documentation = generate_gbnf_grammar_and_documentation([DynamicSampleModel], outer_object_name="function",
outer_object_content="params")

main_model = LlamaCppServerLLMSettings(
completions_endpoint_url="http://127.0.0.1:8080/completion"
Expand Down
13 changes: 5 additions & 8 deletions examples/example_multi_file_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

import json


from llama_cpp_agent.messages_formatter import MessagesFormatterType
from llama_cpp import Llama, LlamaGrammar
from pydantic import BaseModel
Expand All @@ -29,8 +28,6 @@
)




class File(BaseModel):
"""
Correctly named file with contents.
Expand All @@ -56,20 +53,20 @@ class Program(BaseModel):
files: List[File] = Field(..., description="List of files")


gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation(pydantic_model_list=[Program], look_markdown_code_block=False)
gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation(pydantic_model_list=[Program])

print(gbnf_grammar)
grammar = LlamaGrammar.from_string(gbnf_grammar, verbose=True)


llama_cpp_agent = LlamaCppAgent(main_model, debug_output=True,
system_prompt="You are a world class programming AI capable of writing correct python scripts and modules. You will name files correct, include __init__.py files and write correct python code with correct imports.\n\nYou are responding in JSON format.\n\nAvailable JSON response models:\n\n" + documentation.strip() + "\n\nAlways provide full implementation to the user!!!!",
predefined_messages_formatter_type=MessagesFormatterType.MIXTRAL)
system_prompt="You are a world class programming AI capable of writing correct python scripts and modules. You will name files correct, include __init__.py files and write correct python code with correct imports.\n\nYou are responding in JSON format.\n\nAvailable JSON response models:\n\n" + documentation.strip() + "\n\nAlways provide full implementation to the user!!!!",
predefined_messages_formatter_type=MessagesFormatterType.MIXTRAL)


def develop(data: str) -> Program:
prompt = data
response = llama_cpp_agent.get_chat_response(message=prompt, temperature=0.35, mirostat_mode=2, mirostat_tau=4.0,
mirostat_eta=0.1, grammar=grammar)
mirostat_eta=0.1, grammar=gbnf_grammar)
json_obj = json.loads(response)
cls = Program
ai_program = cls(**json_obj)
Expand Down
7 changes: 4 additions & 3 deletions examples/example_simple_function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@

from llama_cpp_agent.messages_formatter import MessagesFormatterType
from llama_cpp_agent.function_calling import LlamaCppFunctionTool
from llama_cpp_agent.providers.openai_endpoint_provider import OpenAIEndpointSettings
from llama_cpp_agent.providers.llama_cpp_server_provider import LlamaCppServerLLMSettings



# Simple calculator tool for the agent that can add, subtract, multiply, and divide.
Expand Down Expand Up @@ -43,8 +44,8 @@ def run(self):

function_tool_registry = LlamaCppAgent.get_function_tool_registry(function_tools)

main_model = OpenAIEndpointSettings(
"http://localhost:8080/v1/completions"
main_model = LlamaCppServerLLMSettings(
"http://localhost:8080/completion"
)
llama_cpp_agent = LlamaCppAgent(main_model, debug_output=False,
system_prompt="You are an advanced AI, tasked to assist the user by calling functions in JSON format.\n\n\n" + function_tool_registry.get_documentation(),
Expand Down
6 changes: 3 additions & 3 deletions examples/example_two_agent_discussion.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from llama_cpp import Llama

from llama_cpp_agent.llm_prompt_template import PromptTemplateFields, Prompter
from llama_cpp_agent.llm_prompt_template import PromptTemplateFields, PromptTemplate
from llama_cpp_agent.llm_agent import LlamaCppAgent
from llama_cpp_agent.messages_formatter import MessagesFormatterType

Expand Down Expand Up @@ -38,11 +38,11 @@
template_fields.add_field("user_role_name", user_role_name)

assistant_system_prompt_template = """You are John Smith, a {assistant_role_name}, you are collaborating with Richard Steen, an expert {user_role_name}."""
assistant_system_prompter = Prompter.from_string(assistant_system_prompt_template)
assistant_system_prompter = PromptTemplate.from_string(assistant_system_prompt_template)
assistant_system_prompt = assistant_system_prompter.generate_prompt(template_fields.get_fields_dict())

user_system_prompt_template = """You are Richard Steen, a {user_role_name}, you are collaborating with Richard Steen, an expert {assistant_role_name}."""
user_system_prompter = Prompter.from_string(user_system_prompt_template)
user_system_prompter = PromptTemplate.from_string(user_system_prompt_template)
user_system_prompt = user_system_prompter.generate_prompt(template_fields.get_fields_dict())

agent_assistant = LlamaCppAgent(model=main_model, name="John Smith", system_prompt=assistant_system_prompt,
Expand Down
56 changes: 56 additions & 0 deletions examples/function_calling_agent.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
{
"llama_generation_settings": {
"max_tokens": 0,
"temperature": 0.65,
"top_k": 0,
"top_p": 0.5,
"min_p": 0.05,
"typical_p": 1.0,
"repeat_penalty": 1.0,
"mirostat_mode": 0,
"mirostat_tau": 5.0,
"mirostat_eta": 0.1,
"tfs_z": 0.975,
"stop_sequences": null,
"stream": true,
"print_output": true
},
"system_prompt": "You are an advanced AI assistant. You are interacting with your environment and the user by calling functions. You call functions by writing JSON objects, which represent specific function calls. Below is a list of your available JSON functions:\n\nFunction: send-message-to-user\n Description: \n Send a message to the user.\n \n Args:\n message (str): The message to be sent.\n Function Parameters:\n message (str)\n\nFunction: calculator\n Description: \n Perform a math operation on two numbers.\n Function Parameters:\n number_one (any):\n Description: First number.\n operation (math-operation):\n Description: Math operation to perform.\n number_two (any):\n Description: Second number.\n\nFunction: write-to-file\n Description: \n Write file to the user filesystem.\n :param chain_of_thought: Your chain of thought while writing the file.\n :param file_path: The file path includes the filename and file ending.\n :param file_content: The actual content to write.\n Function Parameters:\n chain_of_thought (str)\n file_path (str)\n file_content (str)\n\nFunction: read-file\n Description: \n Read file from the user filesystem.\n :param file_path: The file path includes the filename and file ending.\n :return: File content.\n Function Parameters:\n file_path (str)\n\nFunction: get-current-weather\n Function Parameters:\n location (str)\n unit (unit or none-type)\n\n",
"k_last_messages_from_chat_history": 0,
"debug_output": true,
"messages": [
{
"role": "user",
"content": "What is the weather in New York?"
},
{
"role": "assistant",
"content": "{\n \"function\": \"get-current-weather\",\n \"function-parameters\": {\n \"location\": \"New York\",\n \"unit\": \"celsius\"\n }\n}"
},
{
"role": "function",
"content": "Function Call Result: {\"location\": \"New York\", \"temperature\": \"24\", \"unit\": \"celsius\"}"
},
{
"role": "assistant",
"content": "{\n \"function\": \"send-message-to-user\",\n \"function-parameters\": {\n \"message\": \"The current temperature in New York is 24 degrees Celsius.\"\n }\n}"
}
],
"custom_messages_formatter": {
"PRE_PROMPT": "",
"SYS_PROMPT_START": "<|im_start|>system\n",
"SYS_PROMPT_END": "<|im_end|>\n",
"USER_PROMPT_START": "<|im_start|>user\n",
"USER_PROMPT_END": "<|im_end|>\n",
"ASSISTANT_PROMPT_START": "<|im_start|>assistant\n",
"ASSISTANT_PROMPT_END": "<|im_end|>\n",
"INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE": false,
"DEFAULT_STOP_SEQUENCES": [
"<|im_end|>"
],
"FUNCTION_PROMPT_START": "",
"FUNCTION_PROMPT_END": "",
"USE_USER_ROLE_FUNCTION_CALL_RESULT": true,
"STRIP_PROMPT": true
}
}
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "llama-cpp-agent"
version = "0.0.12"
version = "0.0.13"
description = "A framework for building LLM based AI agents with llama-cpp-python."

readme = "ReadMe.md"
Expand Down
17 changes: 9 additions & 8 deletions src/llama_cpp_agent/function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,14 @@ class LlamaCppFunctionTool:
__call__(*args, **kwargs): Calls the Pydantic model with the provided keyword arguments.
"""
def __init__(self, pydantic_model: Type[BaseModel], has_markdown_code_block=False, has_triple_quoted_string=False,
markdown_code_block_field_name=None, triple_quoted_string_field_name=None,
**additional_parameters):
self.model = pydantic_model
self.look_for_field_string = has_markdown_code_block or has_triple_quoted_string
self.has_markdown_code_block = has_markdown_code_block
self.has_triple_quoted_string = has_triple_quoted_string
self.markdown_code_block_field_name = markdown_code_block_field_name
self.triple_quoted_string_field_name = triple_quoted_string_field_name
self.additional_parameters = additional_parameters if additional_parameters else {}

def __call__(self, *args, **kwargs):
Expand Down Expand Up @@ -116,17 +119,15 @@ def finalize(self):
Finalize the registry, generating the GBNF grammar and documentation.
"""
pydantic_function_models = []
look_markdown_code_block = False

for function_tool in self.function_tools.values():
pydantic_function_models.append(function_tool.model)
if function_tool.look_for_field_string:
look_markdown_code_block = True

for function_tool in self.function_tools_containing_field_string.values():
pydantic_function_models.append(function_tool.model)
if function_tool.look_for_field_string:
look_markdown_code_block = True

gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation(
pydantic_function_models, look_markdown_code_block, look_markdown_code_block, self.tool_root,
pydantic_function_models, self.tool_root,
self.tool_rule_content, self.model_prefix,
self.fields_prefix)

Expand Down Expand Up @@ -169,9 +170,9 @@ def handle_function_call(self, function_call_response: str):
marker = "'''" if self.function_tools_containing_field_string[name].has_triple_quoted_string else "```"
function_call, content = parse_json_response_with_markdown_code_block_or_triple_quoted_string(function_call_response, marker)
if self.function_tools_containing_field_string[function_call[self.tool_root]].has_markdown_code_block:
function_call[self.tool_rule_content]["markdown_code_block"] = content
function_call[self.tool_rule_content][tool.markdown_code_block_field_name] = content
elif self.function_tools_containing_field_string[function_call[self.tool_root]].has_triple_quoted_string:
function_call[self.tool_rule_content]["triple_quoted_string"] = content
function_call[self.tool_rule_content][tool.triple_quoted_string_field_name] = content

output = self.intern_function_call(function_call, with_markdown_code_block=True)
return output
Expand Down
Loading

0 comments on commit cf1f03e

Please sign in to comment.