Skip to content

Commit

Permalink
AutoGPT: Implement Agent Protocol (#5612)
Browse files Browse the repository at this point in the history
  • Loading branch information
Pwuts authored Oct 18, 2023
2 parents 6a05e11 + ae9fc68 commit bceb66f
Show file tree
Hide file tree
Showing 101 changed files with 3,723 additions and 2,871 deletions.
19 changes: 1 addition & 18 deletions .github/workflows/autogpt-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,16 @@ on:
paths:
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
- '!autogpts/autogpt/tests/challenges/current_score.json'
pull_request:
branches: [ stable, master, release-* ]
paths:
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
- '!autogpts/autogpt/tests/challenges/current_score.json'
pull_request_target:
branches: [ master, release-*, ci-test* ]
paths:
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
- '!autogpts/autogpt/tests/challenges/current_score.json'

concurrency:
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
Expand Down Expand Up @@ -169,8 +166,7 @@ jobs:
poetry run pytest -vv \
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
tests/unit tests/integration tests/challenges
poetry run python tests/challenges/utils/build_current_score.py
tests/unit tests/integration
env:
CI: true
PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }}
Expand Down Expand Up @@ -199,19 +195,6 @@ jobs:
echo "config_key=$config_key" >> $GITHUB_OUTPUT
- name: Push updated challenge scores
if: github.event_name == 'push'
run: |
score_file="tests/challenges/current_score.json"
if ! git diff --quiet $score_file; then
git add $score_file
git commit -m "Update challenge scores"
git push origin HEAD:${{ github.ref_name }}
else
echo "The challenge scores didn't change."
fi
- id: push_cassettes
name: Push updated cassettes
# For pull requests, push updated cassettes even when tests fail
Expand Down
2 changes: 0 additions & 2 deletions .github/workflows/autogpt-docker-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,11 @@ on:
paths:
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
- '!autogpts/autogpt/tests/challenges/current_score.json'
pull_request:
branches: [ master, release-*, stable ]
paths:
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
- '!autogpts/autogpt/tests/challenges/current_score.json'

concurrency:
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/pr-label.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ on:
branches: [ master, release-* ]
paths-ignore:
- 'autogpts/autogpt/tests/vcr_cassettes'
- 'autogpts/autogpt/tests/challenges/current_score.json'
- 'benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
Expand Down
2 changes: 2 additions & 0 deletions autogpts/autogpt/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ log-ingestion.txt
*.mp3
mem.sqlite3
venvAutoGPT
data/*

# Byte-compiled / optimized / DLL files
__pycache__/
Expand Down Expand Up @@ -163,6 +164,7 @@ CURRENT_BULLETIN.md

# AgBenchmark
agbenchmark_config/reports/
agbenchmark_config/workspace/

# Nodejs
package-lock.json
Expand Down
4 changes: 2 additions & 2 deletions autogpts/autogpt/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,12 @@ CMD []

# dev build -> include everything
FROM autogpt-base as autogpt-dev
RUN poetry install --no-root --without benchmark
RUN poetry install --no-root
ONBUILD COPY . ./

# release build -> include bare minimum
FROM autogpt-base as autogpt-release
RUN poetry install --no-root --without dev,benchmark
RUN poetry install --no-root --without dev
ONBUILD COPY autogpt/ ./autogpt
ONBUILD COPY scripts/ ./scripts
ONBUILD COPY plugins/ ./plugins
Expand Down
25 changes: 13 additions & 12 deletions autogpts/autogpt/agbenchmark_config/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,10 @@
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.app.main import _configure_openai_provider, run_interaction_loop
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, ConfigBuilder
from autogpt.config import AIProfile, ConfigBuilder
from autogpt.logs.config import configure_logging
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.workspace import Workspace

PROJECT_DIR = Path().resolve()
LOG_DIR = Path(__file__).parent / "logs"


Expand All @@ -21,22 +18,24 @@ def run_specific_agent(task: str, continuous_mode: bool = False) -> None:


def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
config = ConfigBuilder.build_config_from_env()
config.debug_mode = False
config.continuous_mode = continuous_mode
config.continuous_limit = 20
config.temperature = 0
config.noninteractive_mode = True
config.plain_output = True
config.memory_backend = "no_memory"
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)

configure_logging(config, LOG_DIR)
configure_logging(
debug_mode=config.debug_mode,
plain_output=config.plain_output,
log_dir=LOG_DIR,
)

command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)

ai_config = AIConfig(
ai_profile = AIProfile(
ai_name="AutoGPT",
ai_role="a multi-purpose AI assistant.",
ai_goals=[task],
Expand All @@ -47,24 +46,26 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
agent_settings = AgentSettings(
name=Agent.default_settings.name,
description=Agent.default_settings.description,
ai_config=ai_config,
ai_profile=ai_profile,
config=AgentConfiguration(
fast_llm=config.fast_llm,
smart_llm=config.smart_llm,
allow_fs_access=not config.restrict_to_workspace,
use_functions_api=config.openai_functions,
plugins=config.plugins,
),
prompt_config=agent_prompt_config,
history=Agent.default_settings.history.copy(deep=True),
)

return Agent(
agent = Agent(
settings=agent_settings,
llm_provider=_configure_openai_provider(config),
command_registry=command_registry,
memory=get_memory(config),
legacy_config=config,
)
agent.attach_fs(config.app_data_dir / "agents" / "AutoGPT-benchmark") # HACK
return agent


if __name__ == "__main__":
Expand Down
9 changes: 8 additions & 1 deletion autogpts/autogpt/agbenchmark_config/config.json
Original file line number Diff line number Diff line change
@@ -1 +1,8 @@
{"workspace": {"input": "auto_gpt_workspace", "output":"auto_gpt_workspace" }, "entry_path": "agbenchmark.benchmarks"}
{
"workspace": {
"input": "agbenchmark_config/workspace",
"output": "agbenchmark_config/workspace"
},
"entry_path": "agbenchmark.benchmarks",
"host": "http://localhost:8000"
}
2 changes: 1 addition & 1 deletion autogpts/autogpt/autogpt/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
import autogpt.app.cli

if __name__ == "__main__":
autogpt.app.cli.main()
autogpt.app.cli.cli()
116 changes: 116 additions & 0 deletions autogpts/autogpt/autogpt/agent_factory/configurators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
from typing import Optional

from autogpt.agent_manager import AgentManager
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIDirectives, AIProfile, Config
from autogpt.core.resource.model_providers import ChatModelProvider
from autogpt.logs.config import configure_chat_plugins
from autogpt.logs.helpers import print_attribute
from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins


def create_agent(
task: str,
ai_profile: AIProfile,
app_config: Config,
llm_provider: ChatModelProvider,
directives: Optional[AIDirectives] = None,
) -> Agent:
if not task:
raise ValueError("No task specified for new agent")
if not directives:
directives = AIDirectives.from_file(app_config.prompt_settings_file)

agent = _configure_agent(
task=task,
ai_profile=ai_profile,
directives=directives,
app_config=app_config,
llm_provider=llm_provider,
)

agent.state.agent_id = AgentManager.generate_id(agent.ai_profile.ai_name)

return agent


def configure_agent_with_state(
state: AgentSettings,
app_config: Config,
llm_provider: ChatModelProvider,
) -> Agent:
return _configure_agent(
state=state,
app_config=app_config,
llm_provider=llm_provider,
)


def _configure_agent(
app_config: Config,
llm_provider: ChatModelProvider,
task: str = "",
ai_profile: Optional[AIProfile] = None,
directives: Optional[AIDirectives] = None,
state: Optional[AgentSettings] = None,
) -> Agent:
if not (state or task and ai_profile and directives):
raise TypeError(
"Either (state) or (task, ai_profile, directives) must be specified"
)

app_config.plugins = scan_plugins(app_config, app_config.debug_mode)
configure_chat_plugins(app_config)

# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry.with_command_modules(
modules=COMMAND_CATEGORIES,
config=app_config,
)

agent_state = state or create_agent_state(
task=task,
ai_profile=ai_profile,
directives=directives,
app_config=app_config,
)

# TODO: configure memory

print_attribute("Configured Browser", app_config.selenium_web_browser)

return Agent(
settings=agent_state,
llm_provider=llm_provider,
command_registry=command_registry,
legacy_config=app_config,
)


def create_agent_state(
task: str,
ai_profile: AIProfile,
directives: AIDirectives,
app_config: Config,
) -> AgentSettings:
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
agent_prompt_config.use_functions_api = app_config.openai_functions

return AgentSettings(
name=Agent.default_settings.name,
description=Agent.default_settings.description,
task=task,
ai_profile=ai_profile,
directives=directives,
config=AgentConfiguration(
fast_llm=app_config.fast_llm,
smart_llm=app_config.smart_llm,
allow_fs_access=not app_config.restrict_to_workspace,
use_functions_api=app_config.openai_functions,
plugins=app_config.plugins,
),
prompt_config=agent_prompt_config,
history=Agent.default_settings.history.copy(deep=True),
)
31 changes: 31 additions & 0 deletions autogpts/autogpt/autogpt/agent_factory/generators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from autogpt.agents.agent import Agent
from autogpt.config import Config
from autogpt.core.resource.model_providers.schema import ChatModelProvider

from autogpt.config.ai_directives import AIDirectives

from .configurators import _configure_agent
from .profile_generator import generate_agent_profile_for_task


async def generate_agent_for_task(
task: str,
app_config: "Config",
llm_provider: "ChatModelProvider",
) -> "Agent":
base_directives = AIDirectives.from_file(app_config.prompt_settings_file)
ai_profile, task_directives = await generate_agent_profile_for_task(
task=task,
app_config=app_config,
llm_provider=llm_provider,
)
return _configure_agent(
task=task,
ai_profile=ai_profile,
directives=base_directives + task_directives,
app_config=app_config,
llm_provider=llm_provider,
)
Loading

0 comments on commit bceb66f

Please sign in to comment.