From d01b6e1115ab3dbf5e2283b84f354dff6dba260a Mon Sep 17 00:00:00 2001 From: FayazRahman Date: Sun, 30 Apr 2023 05:34:51 +0530 Subject: [PATCH] init --- loopgpt/agent.md | 46 +++++++++++++++++++++++++++++++++++++++++++ loopgpt/agent.py | 44 +++++++++++++++++++++++++++++++---------- loopgpt/constants.py | 6 ++++++ loopgpt/loops/repl.py | 6 ++---- 4 files changed, 88 insertions(+), 14 deletions(-) create mode 100644 loopgpt/agent.md diff --git a/loopgpt/agent.md b/loopgpt/agent.md new file mode 100644 index 0000000..6f97066 --- /dev/null +++ b/loopgpt/agent.md @@ -0,0 +1,46 @@ +# Agent + +This file describes how to use Agent objects. + +## Agent State + +An Agent's state can be one of the following: +- `START`: Agent is initialized. +- `IDLE`: No tool is staged and Agent is waiting for input. +- `TOOL_STAGED`: Agent has staged a tool for execution. +- `STOP`: If `task_complete` is executed. + +## Initialize Agent + +An agent can be initialized using: +```python +import loopgpt +agent = loopgpt.Agent() +``` + +## Chat with Agent + +`agent.chat()` deals with sending prompts to the agent and executing commands. It returns the Agent's response (see [loopgpt/constants.py](https://github.com/farizrahman4u/loopgpt/blob/main/loopgpt/constants.py) for the response format). + +It takes two arguments: +- **`**message: Optional[str]`**: The message to send to the agent. Defaults to `None`. +- **`run_tool: bool`**: If specified as `True`, any staged command will be executed. Defaults to `False`. + +### Prompts + +There are two kinds of prompts that are attached with the `message` argument: +- **`agent.init_prompt`**: This prompt is sent along with the first message and in case `message` is `None`. +- **`agent.next_prompt`**: This prompt is sent along with the subsequent messages. + +### Staged tool + +The staged tool (if any), can be accessed through `agent.staging_tool` and the response that staged the tool is stored in `agent.staging_response`. +You can see the name and arguments of the staged tool using `agent.staging_tool.get("name")` and `agent.staging_tool.get("args")` respectively. + +To run the staged tool, just do: + +```python +agent.chat(run_tool=True) +``` + +The tool's response can be found at `agent.tool_response`. diff --git a/loopgpt/agent.py b/loopgpt/agent.py index 11d1167..f144618 100644 --- a/loopgpt/agent.py +++ b/loopgpt/agent.py @@ -5,6 +5,7 @@ DEFAULT_AGENT_DESCRIPTION, NEXT_PROMPT, NEXT_PROMPT_SMALL, + AgentStates ) from loopgpt.memory import from_config as memory_from_config from loopgpt.models import OpenAIModel, from_config as model_from_config @@ -59,6 +60,7 @@ def __init__( self.progress = [] self.plan = [] self.constraints = [] + self.state = AgentStates.START def _get_non_user_messages(self, n): msgs = [ @@ -130,14 +132,26 @@ def _get_compressed_history(self): user_msgs = [i for i in range(len(hist)) if hist[i]["role"] == "user"] hist = [hist[i] for i in range(len(hist)) if i not in user_msgs] return hist + + def get_full_message(self, message: Optional[str]): + if self.state == AgentStates.START: + return self.init_prompt + "\n\n" + (message or "") + else: + return self.next_prompt + "\n\n" + (message or "") @spinner - def chat(self, message: Optional[str] = None, run_tool=False) -> Union[str, Dict]: - if message is None: - message = self.init_prompt + def chat(self, message: Optional[str] = None, run_tool=False) -> Optional[Union[str, Dict]]: + if self.state == AgentStates.STOP: + raise ValueError( + "This agent has completed its tasks. It will not accept any more messages." + " You can do `agent.clear_state()` to start over with the same goals." + ) + message = self.get_full_message(message) if self.staging_tool: tool = self.staging_tool if run_tool: + output = self.run_staging_tool() + self.tool_response = output if tool.get("name") == "task_complete": self.history.append( { @@ -145,9 +159,8 @@ def chat(self, message: Optional[str] = None, run_tool=False) -> Union[str, Dict "content": "Completed all user specified tasks.", } ) - message = "" - output = self.run_staging_tool() - self.tool_response = output + self.state = AgentStates.STOP + return if tool.get("name") != "do_nothing": pass # TODO We dont have enough space for this in gpt3 @@ -186,11 +199,19 @@ def chat(self, message: Optional[str] = None, run_tool=False) -> Union[str, Dict ): self.staging_tool = {"name": "task_complete", "args": {}} self.staging_response = resp + self.state = AgentStates.STOP else: - if "name" in resp: - resp = {"command": resp} - self.staging_tool = resp["command"] - self.staging_response = resp + if isinstance(resp, dict): + if "name" in resp: + resp = {"command": resp} + if "command" in resp: + self.staging_tool = resp["command"] + self.staging_response = resp + self.state = AgentStates.TOOL_STAGED + else: + self.state = AgentStates.IDLE + else: + self.state = AgentStates.IDLE progress = resp.get("thoughts", {}).get("progress") if progress: @@ -321,6 +342,7 @@ def clear_state(self): self.staging_response = None self.tool_response = None self.progress = None + self.state = AgentStates.START self.history.clear() self.sub_agents.clear() self.memory.clear() @@ -407,6 +429,7 @@ def config(self, include_state=True): "description": self.description, "goals": self.goals[:], "constraints": self.constraints[:], + "state": self.state, "model": self.model.config(), "temperature": self.temperature, "tools": [tool.config() for tool in self.tools.values()], @@ -435,6 +458,7 @@ def from_config(cls, config): agent.description = config["description"] agent.goals = config["goals"][:] agent.constraints = config["constraints"][:] + agent.state = config["state"] agent.temperature = config["temperature"] agent.model = model_from_config(config["model"]) agent.tools = {tool.id: tool for tool in map(tool_from_config, config["tools"])} diff --git a/loopgpt/constants.py b/loopgpt/constants.py index 51fafd4..0e105e6 100644 --- a/loopgpt/constants.py +++ b/loopgpt/constants.py @@ -86,6 +86,12 @@ "Always execute plans to completion", ] +class AgentStates: + START = "START" + IDLE = "IDLE" + TOOL_STAGED = "TOOL_STAGED" + STOP = "STOP" + # SPINNER SPINNER_ENABLED = True SPINNER_START_DELAY = 2 diff --git a/loopgpt/loops/repl.py b/loopgpt/loops/repl.py index 7c5e140..d28cb7c 100644 --- a/loopgpt/loops/repl.py +++ b/loopgpt/loops/repl.py @@ -171,7 +171,7 @@ def cli(agent, continuous=False): if cmd == "task_complete": return print_line("system", f"Executing command: {cmd}") - resp = agent.chat(agent.next_prompt, True) + resp = agent.chat(run_tool=True) print_line("system", f"{cmd} output: {agent.tool_response}") elif yn == "n": feedback = input( @@ -179,8 +179,6 @@ def cli(agent, continuous=False): ) if feedback.lower().strip() == "exit": return - next_prompt = agent.next_prompt - feedback = next_prompt + "\n\n" + feedback resp = agent.chat(feedback, False) write_divider() continue @@ -188,4 +186,4 @@ def cli(agent, continuous=False): inp = input(INPUT_PROMPT) if inp.lower().strip() == "exit": return - resp = agent.chat(agent.next_prompt + "\n\n" + inp) + resp = agent.chat(inp)