Skip to content

Commit

Permalink
Update agent wrapper
Browse files Browse the repository at this point in the history
  • Loading branch information
kcze committed Jul 10, 2024
1 parent a37adf8 commit 846984a
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 265 deletions.
8 changes: 7 additions & 1 deletion autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,19 @@ async def propose_action(self) -> OneShotAgentActionProposal:
# Get messages
messages = await self.run_pipeline(MessageProvider.get_messages)

include_os_info = (
self.code_executor.config.execute_local_commands
if hasattr(self, "code_executor")
else False
)

prompt: ChatPrompt = self.prompt_strategy.build_prompt(
messages=messages,
task=self.state.task,
ai_profile=self.state.ai_profile,
ai_directives=directives,
commands=function_specs_from_commands(self.commands),
include_os_info=self.code_executor.config.execute_local_commands,
include_os_info=include_os_info,
)

logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
Expand Down
286 changes: 32 additions & 254 deletions rnd/autogpt_server/autogpt_server/data/agent_block.py
Original file line number Diff line number Diff line change
@@ -1,81 +1,35 @@
from __future__ import annotations

import asyncio
import inspect
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Any, ClassVar, Iterator, Optional
from typing import TYPE_CHECKING, Iterator

from colorama import Fore, Style
import sentry_sdk
from autogpt.agents.prompt_strategies.one_shot import OneShotAgentActionProposal, OneShotAgentPromptStrategy
from autogpt.agents.agent import Agent, AgentSettings
from autogpt.app.config import ConfigBuilder
from autogpt_server.data.block import Block, BlockData, BlockOutput, BlockSchema
from forge.agent.base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from forge.agent.protocols import (
AfterExecute,
AfterParse,
CommandProvider,
DirectiveProvider,
MessageProvider,
)
from forge.command import command
from forge.command.command import Command
from forge.components.action_history import (
ActionHistoryComponent,
EpisodicActionHistory,
)
from forge.components.action_history.action_history import ActionHistoryConfiguration
from forge.components.code_executor.code_executor import (
CodeExecutorComponent,
CodeExecutorConfiguration,
)
from forge.components.context.context import AgentContext, ContextComponent
from forge.components.file_manager import FileManagerComponent
from forge.components.git_operations import GitOperationsComponent
from forge.components.image_gen import ImageGeneratorComponent
from forge.components.system import SystemComponent
from forge.components.user_interaction import UserInteractionComponent
from forge.components.watchdog import WatchdogComponent
from forge.components.web import WebSearchComponent, WebSeleniumComponent
from forge.file_storage import FileStorageBackendName, get_storage
from forge.file_storage.base import FileStorage
from forge.llm.prompting.schema import ChatPrompt
from forge.llm.prompting.utils import dump_prompt
from forge.llm.providers import (
AssistantFunctionCall,
ChatMessage,
ChatModelResponse,
MultiProvider,
)
from forge.llm.providers.utils import function_specs_from_commands
from forge.models.action import (
ActionErrorResult,
ActionInterruptedByHuman,
ActionResult,
ActionSuccessResult,
)
from forge.models.config import Configurable
from forge.models.action import ActionErrorResult
from forge.models.json_schema import JSONSchema
from forge.utils.exceptions import (
AgentException,
AgentTerminated,
CommandExecutionError,
UnknownCommandError,
)
from pydantic import Field
from pydantic import BaseModel, Field

if TYPE_CHECKING:
from autogpt.app.config import AppConfig

logger = logging.getLogger(__name__)


class AgentSettings(BaseAgentSettings):
history: EpisodicActionHistory[OneShotAgentActionProposal] = Field(
default_factory=EpisodicActionHistory[OneShotAgentActionProposal]
)
"""(STATE) The action history of the agent."""
class BlockAgentSettings(AgentSettings):
disabled_components: list[str] = Field(default_factory=list)


class OutputComponent(CommandProvider):
Expand All @@ -96,214 +50,39 @@ def output(self, output: str) -> str:
return output


class SimpleAgent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
default_settings: ClassVar[AgentSettings] = AgentSettings(
name="Agent",
description=__doc__ if __doc__ else "",
)

class BlockAgent(Agent):
def __init__(
self,
settings: AgentSettings,
settings: BlockAgentSettings,
llm_provider: MultiProvider,
file_storage: FileStorage,
app_config: AppConfig,
):
super().__init__(settings)
super().__init__(settings, llm_provider, file_storage, app_config)

self.llm_provider = llm_provider
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(deep=True)
prompt_config.use_functions_api = (
settings.config.use_functions_api
# Anthropic currently doesn't support tools + prefilling :(
and self.llm.provider_name != "anthropic"
)
self.prompt_strategy = OneShotAgentPromptStrategy(prompt_config, logger)
self.commands: list[Command] = []

# Components
self.system = SystemComponent()
self.output = OutputComponent()
# self.history = ActionHistoryComponent(
# settings.history,
# lambda x: self.llm_provider.count_tokens(x, self.llm.name),
# llm_provider,
# ActionHistoryConfiguration(
# model_name=app_config.fast_llm, max_tokens=self.send_token_limit
# ),
# ).run_after(WatchdogComponent)
# self.file_manager = FileManagerComponent(file_storage, settings)
# self.web_search = WebSearchComponent()
# self.web_selenium = WebSeleniumComponent(
# llm_provider,
# app_config.app_data_dir,
# )
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
ContextComponent
)

self.event_history = settings.history
self.app_config = app_config


async def propose_action(self) -> OneShotAgentActionProposal:
"""Proposes the next action to execute, based on the task and current state.

Returns:
The command name and arguments, if any, and the agent's thoughts.
"""
self.reset_trace()
# Disable components
for attr_name in list(self.__dict__.keys()):
attr_value = getattr(self, attr_name)
if type(attr_value).__name__ in settings.disabled_components:
delattr(self, attr_name)

# Get directives
resources = await self.run_pipeline(DirectiveProvider.get_resources)
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)

directives = self.state.directives.model_copy(deep=True)
directives.resources += resources
directives.constraints += constraints
directives.best_practices += best_practices

# Get commands
self.commands = await self.run_pipeline(CommandProvider.get_commands)
self._remove_disabled_commands()

print(f"#########---------> commands: {self.commands}")

# Get messages
messages = await self.run_pipeline(MessageProvider.get_messages)

prompt: ChatPrompt = self.prompt_strategy.build_prompt(
messages=messages,
task=self.state.task,
ai_profile=self.state.ai_profile,
ai_directives=directives,
commands=function_specs_from_commands(self.commands),
include_os_info=False,
)

logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
output = await self.complete_and_parse(prompt)
self.config.cycle_count += 1

print(f"#########---------> output: {output}")

return output

async def complete_and_parse(
self, prompt: ChatPrompt, exception: Optional[Exception] = None
) -> OneShotAgentActionProposal:
if exception:
prompt.messages.append(ChatMessage.system(f"Error: {exception}"))

response: ChatModelResponse[
OneShotAgentActionProposal
] = await self.llm_provider.create_chat_completion(
prompt.messages,
model_name=self.llm.name,
completion_parser=self.prompt_strategy.parse_response_content,
functions=prompt.functions,
prefill_response=prompt.prefill_response,
)
result = response.parsed_result

await self.run_pipeline(AfterParse.after_parse, result)

return result

async def execute(
self,
proposal: OneShotAgentActionProposal,
user_feedback: str = "",
) -> ActionResult:
tool = proposal.use_tool

# Get commands
self.commands = await self.run_pipeline(CommandProvider.get_commands)
self._remove_disabled_commands()

try:
return_value = await self._execute_tool(tool)

result = ActionSuccessResult(outputs=return_value)
except AgentTerminated:
raise
except AgentException as e:
result = ActionErrorResult.from_exception(e)
logger.warning(f"{tool} raised an error: {e}")
sentry_sdk.capture_exception(e)

result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
if result_tlength > self.send_token_limit // 3:
result = ActionErrorResult(
reason=f"Command {tool.name} returned too much output. "
"Do not execute this command again with the same arguments."
)

await self.run_pipeline(AfterExecute.after_execute, result)

logger.debug("\n".join(self.trace))

return result

async def do_not_execute(
self, denied_proposal: OneShotAgentActionProposal, user_feedback: str
) -> ActionResult:
result = ActionInterruptedByHuman(feedback=user_feedback)

await self.run_pipeline(AfterExecute.after_execute, result)

logger.debug("\n".join(self.trace))

return result

async def _execute_tool(self, tool_call: AssistantFunctionCall) -> Any:
"""Execute the command and return the result
Args:
tool_call (AssistantFunctionCall): The tool call to execute
Returns:
str: The execution result
"""
# Execute a native command with the same name or alias, if it exists
command = self._get_command(tool_call.name)
try:
result = command(**tool_call.arguments)
if inspect.isawaitable(result):
return await result
return result
except AgentException:
raise
except Exception as e:
raise CommandExecutionError(str(e))

def _get_command(self, command_name: str) -> Command:
for command in reversed(self.commands):
if command_name in command.names:
return command

raise UnknownCommandError(
f"Cannot execute command '{command_name}': unknown command."
)

def _remove_disabled_commands(self) -> None:
self.commands = [
command
for command in self.commands
if not any(
name in self.app_config.disabled_commands for name in command.names
)
]

class AutoGPTOutput(BaseModel):
result: str
tool: str

class AutoGPTAgentBlock(Block):
class Input(BlockSchema):
task: str
input: str
disabled_components: list[str] = Field(default_factory=list)
disabled_commands: list[str] = Field(default_factory=list)

class Output(BlockSchema):
output: str
output: AutoGPTOutput
# error: str

def __init__(self):
super().__init__(
Expand All @@ -315,6 +94,8 @@ def __init__(self):
def run(self, input_data: Input) -> BlockOutput:
# Set up configuration
config = ConfigBuilder.build_config_from_env()
# Disable commands
config.disabled_commands.extend(input_data.disabled_commands)

# Storage
local = config.file_storage_backend == FileStorageBackendName.LOCAL
Expand All @@ -333,34 +114,31 @@ def run(self, input_data: Input) -> BlockOutput:
multi_provider.get_model_provider(model)

# State
state = AgentSettings(
state = BlockAgentSettings(
agent_id="TemporaryAgentID",
name="WrappedAgent",
description="Wrapped agent for the Agent Server.",
task=f"Your task: {input_data.task}\n"
f"Input data: {input_data.input}",
disabled_components=input_data.disabled_components,
)
# Disable slow models
state.config.big_brain = False

agent = SimpleAgent(state, multi_provider, file_storage, config)
print(f"{Fore.GREEN}Agent created{Style.RESET_ALL}")
agent = BlockAgent(state, multi_provider, file_storage, config)

# Execute agent
for tries in range(3):
try:
print(f"{Fore.CYAN}Proposing action...{Style.RESET_ALL}")
proposal = asyncio.run(agent.propose_action())
print(f"{Fore.GREEN}Proposal created{Style.RESET_ALL}")
break
except Exception as e:
print(f"{Fore.YELLOW}Proposal failed: {e}{Style.RESET_ALL}")
if tries == 2:
print(f"{Fore.RED}Failed to create proposal{Style.RESET_ALL}")
raise e

print(f"{Fore.GREEN}Proposal executed{Style.RESET_ALL}")

result = asyncio.run(agent.execute(proposal))
print(f"{Fore.GREEN}Command executed{Style.RESET_ALL}")

yield "output", str(result)
# if isinstance(result, ActionErrorResult):
# yield "error", str(result)
# else:
yield "output", AutoGPTOutput(result=str(result), tool=proposal.use_tool.name)
Empty file.
Loading

0 comments on commit 846984a

Please sign in to comment.