diff --git a/chatbot/bot/conversation/ctx_strategy.py b/chatbot/bot/conversation/ctx_strategy.py index fbd5536..656d9da 100644 --- a/chatbot/bot/conversation/ctx_strategy.py +++ b/chatbot/bot/conversation/ctx_strategy.py @@ -79,7 +79,7 @@ def generate_response( max_new_tokens (int, optional): Maximum number of tokens for the generated response. Default is 512. Returns: - Union[str, Any]: The generated response or a response generator. + Any: A response generator. """ cur_response = None @@ -124,7 +124,7 @@ def __init__(self, llm: LlmClient): def generate_response( self, retrieved_contents: List[Document], question: str, max_new_tokens: int = 512, num_children: int = 2 - ) -> Union[str, Any]: + ) -> Any: """ Generate a response using hierarchical summarization strategy. @@ -136,7 +136,7 @@ def generate_response( num_children (int, optional): Number of child nodes to create for the response. Default is 2. Returns: - Union[str, Any]: The generated response. + Any: A response generator. """ fmt_prompts = [] node_responses = [] @@ -177,7 +177,7 @@ def combine_results( num_children (int, optional): Number of child nodes to create for the response. Default is 2. Returns: - Any: The combined response. + Any: A response generator. """ fmt_prompts = [] new_texts = [] @@ -235,7 +235,7 @@ async def generate_response( num_children (int, optional): The number of child nodes to create for the response. Default is 2. Returns: - Any: The combined response. + Any: A response generator. """ fmt_prompts = [] for idx, content in enumerate(retrieved_contents, start=1): @@ -276,7 +276,7 @@ async def combine_results( num_children (int, optional): Number of child nodes to create for the response. Default is 2. Returns: - Any: The combined response. + Any: A response generator. """ fmt_prompts = [] for idx in range(0, len(texts), num_children):