Skip to content

Commit

Permalink
refactor(agent): Refactor & improve create_chat_completion (Signifi…
Browse files Browse the repository at this point in the history
…cant-Gravitas#7082)

* refactor(agent/core): Rearrange and split up `OpenAIProvider.create_chat_completion`
   - Rearrange to reduce complexity, improve separation/abstraction of concerns, and allow multiple points of failure during parsing
   - Move conversion from `ChatMessage` to `openai.types.ChatCompletionMessageParam` to `_get_chat_completion_args`
   - Move token usage and cost tracking boilerplate code to `_create_chat_completion`
   - Move tool call conversion/parsing to `_parse_assistant_tool_calls` (new)

* fix(agent/core): Handle decoding of function call arguments in `create_chat_completion`
   - Amend `model_providers.schema`: change type of `arguments` from `str` to `dict[str, Any]` on `AssistantFunctionCall` and `AssistantFunctionCallDict`
   - Implement robust and transparent parsing in `OpenAIProvider._parse_assistant_tool_calls`
   - Remove now unnecessary `json_loads` calls throughout codebase

* feat(agent/utils): Improve conditions and errors in `json_loads`
   - Include all decoding errors when raising a ValueError on decode failure
   - Use errors returned by `return_errors` instead of an error buffer
   - Fix check for decode failure
  • Loading branch information
Pwuts committed Apr 16, 2024
1 parent d7f00a9 commit 7082e63
Show file tree
Hide file tree
Showing 8 changed files with 208 additions and 113 deletions.
5 changes: 1 addition & 4 deletions autogpts/autogpt/autogpt/agent_factory/profile_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
from autogpt.core.utils.json_utils import json_loads

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -203,9 +202,7 @@ def parse_response_content(
f"LLM did not call {self._create_agent_function.name} function; "
"agent profile creation failed"
)
arguments: object = json_loads(
response_content.tool_calls[0].function.arguments
)
arguments: object = response_content.tool_calls[0].function.arguments
ai_profile = AIProfile(
ai_name=arguments.get("name"),
ai_role=arguments.get("description"),
Expand Down
4 changes: 2 additions & 2 deletions autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
from autogpt.core.utils.json_utils import extract_dict_from_json, json_loads
from autogpt.core.utils.json_utils import extract_dict_from_json
from autogpt.prompts.utils import format_numbered_list, indent


Expand Down Expand Up @@ -436,7 +436,7 @@ def extract_command(
raise InvalidAgentResponseError("No 'tool_calls' in assistant reply")
assistant_reply_json["command"] = {
"name": assistant_reply.tool_calls[0].function.name,
"args": json_loads(assistant_reply.tool_calls[0].function.arguments),
"args": assistant_reply.tool_calls[0].function.arguments,
}
try:
if not isinstance(assistant_reply_json, dict):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
from autogpt.core.utils.json_utils import json_loads

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -195,9 +194,7 @@ def parse_response_content(
f"LLM did not call {self._create_plan_function.name} function; "
"plan creation failed"
)
parsed_response: object = json_loads(
response_content.tool_calls[0].function.arguments
)
parsed_response: object = response_content.tool_calls[0].function.arguments
parsed_response["task_list"] = [
Task.parse_obj(task) for task in parsed_response["task_list"]
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
from autogpt.core.utils.json_utils import json_loads

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -141,9 +140,7 @@ def parse_response_content(
f"LLM did not call {self._create_agent_function} function; "
"agent profile creation failed"
)
parsed_response = json_loads(
response_content.tool_calls[0].function.arguments
)
parsed_response = response_content.tool_calls[0].function.arguments
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
from autogpt.core.utils.json_utils import json_loads

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -188,9 +187,7 @@ def parse_response_content(
raise ValueError("LLM did not call any function")

function_name = response_content.tool_calls[0].function.name
function_arguments = json_loads(
response_content.tool_calls[0].function.arguments
)
function_arguments = response_content.tool_calls[0].function.arguments
parsed_response = {
"motivation": function_arguments.pop("motivation"),
"self_criticism": function_arguments.pop("self_criticism"),
Expand Down
Loading

0 comments on commit 7082e63

Please sign in to comment.