Skip to content

Commit

Permalink
Merge branch 'develop' v0.1.11
Browse files Browse the repository at this point in the history
  • Loading branch information
rstrahan committed Nov 7, 2023
2 parents 9c00703 + d845afc commit c56dec2
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 10 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

## [0.1.11] - 2023-11-07
### Fixed
- Error in Bedrock QnABotSettingQAPromptTemplate output - prompt does not terminate with '\n\nAssistant:` and generates error from Bedrock - #13

## [0.1.10] - 2023-10-27
### Fixed
- Prompt bug: question not denoted by an XML tag, so LLM gets confused about what it's answering - PR #13
Expand Down
21 changes: 14 additions & 7 deletions lambdas/bedrock-embeddings-and-llm/src/lambdahook.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,15 @@ def get_generate_text(modelId, response):
raise Exception("Unsupported provider: ", provider)
return generated_text

def replace_template_placeholders(prompt, event):
# history
history_array = json.loads(event["req"]["_userInfo"].get("chatMessageHistory","[]"))
history_str = '\n'.join(f"{key}: {value}" for item in history_array for key, value in item.items())
prompt = prompt.replace("{history}", history_str)
# TODO - replace additional prompt template placeholders - eg query, input, session attributes, user info
return prompt

def format_prompt(modelId, prompt):
# TODO - replace prompt template placeholders - eg query, input, chatHistory, session attributes, user info
def format_prompt(modelId, prompt):
provider = modelId.split(".")[0]
if provider == "anthropic":
print("Model provider is Anthropic. Checking prompt format.")
Expand All @@ -75,10 +81,8 @@ def format_prompt(modelId, prompt):
print(f"Prompt: {json.dumps(prompt)}")
return prompt

def get_llm_response(parameters, prompt):
def get_llm_response(modelId, parameters, prompt):
global client
modelId = parameters.pop("modelId", DEFAULT_MODEL_ID)
prompt = format_prompt(modelId, prompt)
body = get_request_body(modelId, parameters, prompt)
print("ModelId", modelId, "- Body: ", body)
if (client is None):
Expand Down Expand Up @@ -127,10 +131,13 @@ def lambda_handler(event, context):
print("Received event: %s" % json.dumps(event))
# args = {"Prefix:"<Prefix|None>", "Model_params":{"modelId":"anthropic.claude-instant-v1", "max_tokens":256}, "Prompt":"<prompt>"}
args = get_args_from_lambdahook_args(event)
model_params = args.get("Model_params",{})
modelId = model_params.pop("modelId", DEFAULT_MODEL_ID)
# prompt set from args, or from req.question if not specified in args.
prompt = args.get("Prompt", event["req"]["question"])
model_params = args.get("Model_params",{})
llm_response = get_llm_response(model_params, prompt)
prompt = format_prompt(modelId, prompt)
prompt = replace_template_placeholders(prompt, event)
llm_response = get_llm_response(modelId, model_params, prompt)
prefix = args.get("Prefix","LLM Answer:")
event = format_response(event, llm_response, prefix)
print("Returning response: %s" % json.dumps(event))
Expand Down
2 changes: 1 addition & 1 deletion lambdas/bedrock-embeddings-and-llm/src/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# Default prompt templates
AMAZON_GENERATE_QUERY_PROMPT_TEMPLATE = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.<br>Chat History: <br>{history}<br>Follow up question: {input}<br>Standalone question:"""
AMAZON_QA_PROMPT_TEMPLATE = """<br><br>Human: You are a friendly AI assistant. Answer the question in <question> tags only based on the provided reference passages. Here are reference passages in <references> tags:<br><references><br>{context}<br></references><br>If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references. <br>Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.<br>Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".<br><question><br>{query}<br></question><br>Assistant: According to the reference passages, in under 50 words:"""
AMAZON_QA_PROMPT_TEMPLATE = """<br><br>Human: You are a friendly AI assistant. Answer the question in <question> tags only based on the provided reference passages. Here are reference passages in <references> tags:<br><references><br>{context}<br></references><br>If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references. <br>Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.<br>Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".<br><question><br>{query}<br></question><br><br>Assistant: According to the reference passages, in under 50 words:"""
ANTHROPIC_GENERATE_QUERY_PROMPT_TEMPLATE = """<br><br>Human: Here is a chat history in <chatHistory> tags:<br><chatHistory><br>{history}<br></chatHistory><br>Human: And here is a follow up question or statement from the human in <followUpMessage> tags:<br><followUpMessage><br>{input}<br></followUpMessage><br>Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.<br><br>Assistant: Here is the rephrased follow up question or statement:"""
ANTHROPIC_QA_PROMPT_TEMPLATE = AMAZON_QA_PROMPT_TEMPLATE
AI21_GENERATE_QUERY_PROMPT_TEMPATE = ANTHROPIC_GENERATE_QUERY_PROMPT_TEMPLATE
Expand Down
4 changes: 2 additions & 2 deletions lambdas/bedrock-embeddings-and-llm/template.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
AWSTemplateFormatVersion: "2010-09-09"
Description: QnABot on AWS LLM Plugin for Bedrock - v0.1.7
Description: QnABot on AWS LLM Plugin for Bedrock - v0.1.11

Parameters:

Expand Down Expand Up @@ -309,7 +309,7 @@ Resources:
ServiceToken: !GetAtt OutputSettingsFunction.Arn
EmbeddingsModelId: !Ref EmbeddingsModelId
LLMModelId: !Ref LLMModelId
LastUpdate: '10/25/2023'
LastUpdate: '11/07/2023'

TestBedrockModelFunction:
Type: AWS::Lambda::Function
Expand Down

0 comments on commit c56dec2

Please sign in to comment.