Skip to content

Commit

Permalink
Merge branch 'develop' v0.1.15
Browse files Browse the repository at this point in the history
  • Loading branch information
rstrahan committed Mar 7, 2024
2 parents 69d2606 + 127c86a commit 054f9f7
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 19 deletions.
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

## [0.1.15] - 2024-03-07
### Added
- Amazon Bedrock LLM plugin now suports anthropic.claude-3-sonnet model, and deprecates anthropic.claude-v1 - PR #23.

## [0.1.14] - 2023-12-22
### Added
- Amazon Q Business Expert plugin now suports optional file attachments via Lex Web UI (v0.20.4) attach option and the new userFileUpload session attribute - PR #23.
Expand Down Expand Up @@ -81,7 +85,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Initial release

[Unreleased]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/compare/v0.1.14...develop
[0.1.14]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.13
[0.1.15]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.15
[0.1.14]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.14
[0.1.13]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.13
[0.1.12]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.12
[0.1.11]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.11
Expand Down
12 changes: 12 additions & 0 deletions lambdas/anthropic-llm/src/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,18 @@ def getModelSettings(model):
"minTokens": 0,
"topP": 1
}
# claude-3 message API params are slightly different
provider = modelId.split(".")[0]
if provider == "anthropic":
if modelId.startswith("anthropic.claude-3"):
params = {
"model": model,
"temperature": 0,
"max_tokens": 256,
"top_p": 1,
"system": "You are a helpful AI assistant."
}

settings = {
'LLM_GENERATE_QUERY_MODEL_PARAMS': json.dumps(params),
'LLM_QA_MODEL_PARAMS': json.dumps(params),
Expand Down
2 changes: 1 addition & 1 deletion lambdas/bedrock-embeddings-and-llm/src/lambdahook.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def format_prompt(modelId, prompt):
provider = modelId.split(".")[0]
if provider == "anthropic":
print("Model provider is Anthropic. Checking prompt format.")
if not prompt.startswith("\n\nHuman:"):
if not prompt.startswith("\n\nHuman:") or not prompt.startswith("\n\nSystem:"):
prompt = "\n\nHuman: " + prompt
print("Prepended '\\n\\nHuman:'")
if not prompt.endswith("\n\nAssistant:"):
Expand Down
37 changes: 24 additions & 13 deletions lambdas/bedrock-embeddings-and-llm/src/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,20 @@ def get_request_body(modelId, parameters, prompt):
provider = modelId.split(".")[0]
request_body = None
if provider == "anthropic":
request_body = {
"prompt": prompt,
"max_tokens_to_sample": DEFAULT_MAX_TOKENS
}
request_body.update(parameters)
# claude-3 models use new messages format
if modelId.startswith("anthropic.claude-3"):
request_body = {
"anthropic_version": "bedrock-2023-05-31",
"messages": [{"role": "user", "content": [{'type':'text','text': prompt}]}],
"max_tokens": DEFAULT_MAX_TOKENS
}
request_body.update(parameters)
else:
request_body = {
"prompt": prompt,
"max_tokens_to_sample": DEFAULT_MAX_TOKENS
}
request_body.update(parameters)
elif provider == "ai21":
request_body = {
"prompt": prompt,
Expand Down Expand Up @@ -59,20 +68,21 @@ def get_request_body(modelId, parameters, prompt):
def get_generate_text(modelId, response):
provider = modelId.split(".")[0]
generated_text = None
response_body = json.loads(response.get("body").read())
print("Response body: ", json.dumps(response_body))
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
generated_text = response_body.get("completion")
# claude-3 models use new messages format
if modelId.startswith("anthropic.claude-3"):
generated_text = response_body.get("content")[0].get("text")
else:
generated_text = response_body.get("completion")
elif provider == "ai21":
response_body = json.loads(response.get("body").read())
generated_text = response_body.get("completions")[0].get("data").get("text")
elif provider == "amazon":
response_body = json.loads(response.get("body").read())
generated_text = response_body.get("results")[0].get("outputText")
elif provider == "cohere":
response_body = json.loads(response.get('body').read())
generated_text = response_body.get("generations")[0].get("text")
elif provider == "meta":
response_body = json.loads(response.get('body').read())
generated_text = response_body.get("generation")
else:
raise Exception("Unsupported provider: ", provider)
Expand All @@ -95,8 +105,9 @@ def call_llm(parameters, prompt):
{
"prompt": "\n\nHuman:Why is the sky blue?\n\nAssistant:",
"parameters": {
"modelId": "anthropic.claude-v1",
"temperature": 0
"modelId": "anthropic.claude-3-sonnet-20240229-v1:0",
"temperature": 0,
"system": "You are an AI assistant that always answers in ryhming couplets"
}
}
For supported parameters for each provider model, see Bedrock docs: https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers
Expand Down
21 changes: 20 additions & 1 deletion lambdas/bedrock-embeddings-and-llm/src/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,29 @@ def getModelSettings(modelId):
"modelId": modelId,
"temperature": 0
}
params_qa = params.copy()
# claude-3 message API params are slightly different
provider = modelId.split(".")[0]
if provider == "anthropic":
if modelId.startswith("anthropic.claude-3"):
params = {
"modelId": modelId,
"temperature": 0,
"max_tokens": 256,
"top_p": 1
}
# add optional system prompt to qa params
params_qa = {
"modelId": modelId,
"temperature": 0,
"max_tokens": 256,
"top_p": 1,
"system": "You are a helpful AI assistant."
}
lambdahook_args = {"Prefix":"LLM Answer:", "Model_params": params}
settings = {
'LLM_GENERATE_QUERY_MODEL_PARAMS': json.dumps(params),
'LLM_QA_MODEL_PARAMS': json.dumps(params),
'LLM_QA_MODEL_PARAMS': json.dumps(params_qa),
'QNAITEM_LAMBDAHOOK_ARGS': json.dumps(lambdahook_args)
}
provider = modelId.split(".")[0]
Expand Down
6 changes: 3 additions & 3 deletions lambdas/bedrock-embeddings-and-llm/template.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
AWSTemplateFormatVersion: "2010-09-09"
Description: QnABot on AWS LLM Plugin for Bedrock - v0.1.13
Description: QnABot on AWS LLM Plugin for Bedrock - v0.1.15

Parameters:

Expand All @@ -18,10 +18,10 @@ Parameters:
- amazon.titan-text-lite-v1
- ai21.j2-ultra-v1
- ai21.j2-mid-v1
- anthropic.claude-v1
- anthropic.claude-instant-v1
- anthropic.claude-v2
- anthropic.claude-v2:1
- anthropic.claude-3-sonnet-20240229-v1:0
- cohere.command-text-v14
- cohere.command-light-text-v14
- meta.llama2-13b-chat-v1
Expand Down Expand Up @@ -303,7 +303,7 @@ Resources:
ServiceToken: !GetAtt OutputSettingsFunction.Arn
EmbeddingsModelId: !Ref EmbeddingsModelId
LLMModelId: !Ref LLMModelId
LastUpdate: '11/07/2023'
LastUpdate: '03/07/2024 11:45'

TestBedrockModelFunction:
Type: AWS::Lambda::Function
Expand Down

0 comments on commit 054f9f7

Please sign in to comment.