From ccad47c99388d66abbdc18748a2bb55d3b1cce9a Mon Sep 17 00:00:00 2001 From: shanurrahman Date: Wed, 6 Dec 2023 09:12:35 +0300 Subject: [PATCH] fixing edge cases --- llm-server/custom_types/bot_message.py | 3 ++- llm-server/routes/root_service.py | 6 ++++++ .../workflow/extractors/convert_json_to_text.py | 12 ++++++------ .../workflow/utils/process_conversation_step.py | 7 ++++--- llm-server/routes/workflow/utils/run_openapi_ops.py | 13 +++++++++---- 5 files changed, 27 insertions(+), 14 deletions(-) diff --git a/llm-server/custom_types/bot_message.py b/llm-server/custom_types/bot_message.py index e07a83069..b77b6c0e2 100644 --- a/llm-server/custom_types/bot_message.py +++ b/llm-server/custom_types/bot_message.py @@ -1,10 +1,11 @@ -from typing import List +from typing import List, Optional from langchain.pydantic_v1 import BaseModel, Field from langchain.output_parsers import PydanticOutputParser class BotMessage(BaseModel): bot_message: str = Field(description="Message from the bot") ids: List[str] = Field(description="List of IDs") + missing_information: Optional[str] = Field(description="Incase of ambiguity ask user follow up question") # Set up a parser + inject instructions into the prompt template. diff --git a/llm-server/routes/root_service.py b/llm-server/routes/root_service.py index c9b1963d6..27bcc726b 100644 --- a/llm-server/routes/root_service.py +++ b/llm-server/routes/root_service.py @@ -86,6 +86,12 @@ async def handle_request( bot_id=bot_id, base_prompt=base_prompt ) + + if step.missing_information is not None and len(step.missing_information) >= 10: + return { + "error": None, + "response": step.missing_information + } if len(step.ids) > 0: response = await handle_api_calls( diff --git a/llm-server/routes/workflow/extractors/convert_json_to_text.py b/llm-server/routes/workflow/extractors/convert_json_to_text.py index 112f417e1..36cf19439 100644 --- a/llm-server/routes/workflow/extractors/convert_json_to_text.py +++ b/llm-server/routes/workflow/extractors/convert_json_to_text.py @@ -22,7 +22,7 @@ def convert_json_to_text( api_summarizer_template = None system_message = SystemMessage( - content="You are a chatbot that can understand API responses" + content="You are an ai assistant that can summarize api responses" ) prompt_templates = load_prompts(bot_id) api_summarizer_template = ( @@ -35,15 +35,15 @@ def convert_json_to_text( messages = [ system_message, HumanMessage( - content="You'll receive user input and server responses obtained by making calls to various APIs. You will also recieve a dictionary that specifies, the body, param and query param used to make those api calls. Your task is to transform the JSON response into a response that is an answer to the user input. You should inform the user about the filters that were used to make these api calls. Try to respond in 3 sentences or less, unless there is too much to summarize." + content="You'll receive user input and server responses obtained by making calls to various APIs. Your task is to summarize the api response that is an answer to the user input. Try to be concise and accurate, and also include references if present." ), - HumanMessage(content="Here is the user input: {}.".format(user_input)), + HumanMessage(content=user_input), HumanMessage( content="Here is the response from the apis: {}".format(api_response) ), - HumanMessage( - content="Here is the api_request_data: {}".format(api_request_data) - ), + # HumanMessage( + # content="Here is the api_request_data: {}".format(api_request_data) + # ), ] result = chat(messages) diff --git a/llm-server/routes/workflow/utils/process_conversation_step.py b/llm-server/routes/workflow/utils/process_conversation_step.py index 0ce9b3394..0e3d08f81 100644 --- a/llm-server/routes/workflow/utils/process_conversation_step.py +++ b/llm-server/routes/workflow/utils/process_conversation_step.py @@ -81,7 +81,8 @@ def process_conversation_step( content="""Based on the information provided to you I want you to answer the questions that follow. Your should respond with a json that looks like the following - {{ "ids": ["list", "of", "operationIds", "for apis to be called"], - "bot_message": "your response based on the instructions provided at the beginning" + "bot_message": "your response based on the instructions provided at the beginning", + "missing_information": "Optional Field; Incase of ambiguity where user input is not sufficient to make the api call, ask follow up questions. Followup question should only be asked once per user input" }} """ ) @@ -107,7 +108,7 @@ def process_conversation_step( except OutputParserException as e: logger.error("Failed to parse json", data=content) logger.error("Failed to parse json", err=str(e)) - return BotMessage(bot_message=content, ids=[]) + return BotMessage(bot_message=content, ids=[], missing_information=None) except Exception as e: logger.error("unexpected error occured", err=str(e)) - return BotMessage(ids=[], bot_message=str(e)) + return BotMessage(ids=[], bot_message=str(e), missing_information=None) diff --git a/llm-server/routes/workflow/utils/run_openapi_ops.py b/llm-server/routes/workflow/utils/run_openapi_ops.py index d64cc31bb..e135e8945 100644 --- a/llm-server/routes/workflow/utils/run_openapi_ops.py +++ b/llm-server/routes/workflow/utils/run_openapi_ops.py @@ -68,14 +68,19 @@ async def run_openapi_operations( partial_json = load_json_config(app, operation_id) if not partial_json: logger.error( - "Failed to find a config map. Consider adding a config map for this operation id", + "Config map is not defined for this operationId", incident="config_map_undefined", operation_id=operation_id, app=app ) - record_info[operation_id] = transform_api_response_from_schema( - api_payload.endpoint or "", api_response.text - ) + record_info[operation_id] = api_response.text + + # Removed this because this slows down the bot response instead of speeding it + # record_info[operation_id] = transform_api_response_from_schema( + # api_payload.endpoint or "", api_response.text + # ) + + pass else: logger.info( "API Response",