Skip to content

Commit

Permalink
Merge branch 'develop' v0.1.13
Browse files Browse the repository at this point in the history
  • Loading branch information
rstrahan committed Dec 7, 2023
2 parents 3a2ed33 + fe54f8d commit 964cd36
Show file tree
Hide file tree
Showing 5 changed files with 69 additions and 14 deletions.
13 changes: 12 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

## [0.1.13] - 2023-12-06
### Added
- Bedrock plugin updates to support new text models - #22
- amazon.titan-text-lite-v1
- anthropic.claude-v2:1
- cohere.command-text-v14
- cohere.command-light-text-v14
- meta.llama2-13b-chat-v1
- meta.llama2-70b-chat-v1

## [0.1.12] - 2023-11-29
### Added
- Amazon Q, your business expert now integrates with QnABot as a fallback answer source, using QnAbot's using Lambda hooks with CustomNoMatches/no_hits. For more information see: [QnABot LambdaHook for Amazon Q, your business expert (preview)](./lambdas/qna_bot_qbusiness_lambdahook/README.md)
Expand Down Expand Up @@ -65,7 +75,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Initial release

[Unreleased]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/compare/v0.1.12...develop
[Unreleased]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/compare/v0.1.13...develop
[0.1.13]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.13
[0.1.12]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.12
[0.1.11]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.11
[0.1.10]: https://github.com/aws-samples/qnabot-on-aws-plugin-samples/releases/tag/v0.1.10
Expand Down
18 changes: 18 additions & 0 deletions lambdas/bedrock-embeddings-and-llm/src/lambdahook.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,18 @@ def get_request_body(modelId, parameters, prompt):
"inputText": prompt,
"textGenerationConfig": textGenerationConfig
}
elif provider == "cohere":
request_body = {
"prompt": prompt,
"max_tokens": DEFAULT_MAX_TOKENS
}
request_body.update(parameters)
elif provider == "meta":
request_body = {
"prompt": prompt,
"max_gen_len": DEFAULT_MAX_TOKENS
}
request_body.update(parameters)
else:
raise Exception("Unsupported provider: ", provider)
return request_body
Expand All @@ -56,6 +68,12 @@ def get_generate_text(modelId, response):
elif provider == "amazon":
response_body = json.loads(response.get("body").read())
generated_text = response_body.get("results")[0].get("outputText")
elif provider == "cohere":
response_body = json.loads(response.get('body').read())
generated_text = response_body.get("generations")[0].get("text")
elif provider == "meta":
response_body = json.loads(response.get('body').read())
generated_text = response_body.get("generation")
else:
raise Exception("Unsupported provider: ", provider)
return generated_text
Expand Down
18 changes: 18 additions & 0 deletions lambdas/bedrock-embeddings-and-llm/src/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,18 @@ def get_request_body(modelId, parameters, prompt):
"inputText": prompt,
"textGenerationConfig": textGenerationConfig
}
elif provider == "cohere":
request_body = {
"prompt": prompt,
"max_tokens": DEFAULT_MAX_TOKENS
}
request_body.update(parameters)
elif provider == "meta":
request_body = {
"prompt": prompt,
"max_gen_len": DEFAULT_MAX_TOKENS
}
request_body.update(parameters)
else:
raise Exception("Unsupported provider: ", provider)
return request_body
Expand All @@ -56,6 +68,12 @@ def get_generate_text(modelId, response):
elif provider == "amazon":
response_body = json.loads(response.get("body").read())
generated_text = response_body.get("results")[0].get("outputText")
elif provider == "cohere":
response_body = json.loads(response.get('body').read())
generated_text = response_body.get("generations")[0].get("text")
elif provider == "meta":
response_body = json.loads(response.get('body').read())
generated_text = response_body.get("generation")
else:
raise Exception("Unsupported provider: ", provider)
return generated_text
Expand Down
14 changes: 14 additions & 0 deletions lambdas/bedrock-embeddings-and-llm/src/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
ANTHROPIC_QA_PROMPT_TEMPLATE = AMAZON_QA_PROMPT_TEMPLATE
AI21_GENERATE_QUERY_PROMPT_TEMPATE = ANTHROPIC_GENERATE_QUERY_PROMPT_TEMPLATE
AI21_QA_PROMPT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Documents: {context} Instruction: Based on the above documents, provide a detailed answer for {query} Answer "don't know" if not present in the document. Solution:"""
COHERE_GENERATE_QUERY_PROMPT_TEMPLATE = AMAZON_GENERATE_QUERY_PROMPT_TEMPLATE
COHERE_QA_PROMPT_TEMPLATE = AMAZON_QA_PROMPT_TEMPLATE
META_GENERATE_QUERY_PROMPT_TEMPLATE = AMAZON_GENERATE_QUERY_PROMPT_TEMPLATE
META_QA_PROMPT_TEMPLATE = AMAZON_QA_PROMPT_TEMPLATE

def getEmbeddingSettings(modelId):
provider = modelId.split(".")[0]
Expand Down Expand Up @@ -51,6 +55,16 @@ def getModelSettings(modelId):
'LLM_GENERATE_QUERY_PROMPT_TEMPLATE': AMAZON_GENERATE_QUERY_PROMPT_TEMPLATE,
'LLM_QA_PROMPT_TEMPLATE': AMAZON_QA_PROMPT_TEMPLATE
})
elif provider == "cohere":
settings.update({
'LLM_GENERATE_QUERY_PROMPT_TEMPLATE': COHERE_GENERATE_QUERY_PROMPT_TEMPLATE,
'LLM_QA_PROMPT_TEMPLATE': COHERE_QA_PROMPT_TEMPLATE
})
elif provider == "meta":
settings.update({
'LLM_GENERATE_QUERY_PROMPT_TEMPLATE': META_GENERATE_QUERY_PROMPT_TEMPLATE,
'LLM_QA_PROMPT_TEMPLATE': META_QA_PROMPT_TEMPLATE
})
else:
raise Exception("Unsupported provider: ", provider)
return settings
Expand Down
20 changes: 7 additions & 13 deletions lambdas/bedrock-embeddings-and-llm/template.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
AWSTemplateFormatVersion: "2010-09-09"
Description: QnABot on AWS LLM Plugin for Bedrock - v0.1.11
Description: QnABot on AWS LLM Plugin for Bedrock - v0.1.13

Parameters:

Expand All @@ -15,11 +15,17 @@ Parameters:
Default: anthropic.claude-instant-v1
AllowedValues:
- amazon.titan-text-express-v1
- amazon.titan-text-lite-v1
- ai21.j2-ultra-v1
- ai21.j2-mid-v1
- anthropic.claude-v1
- anthropic.claude-instant-v1
- anthropic.claude-v2
- anthropic.claude-v2:1
- cohere.command-text-v14
- cohere.command-light-text-v14
- meta.llama2-13b-chat-v1
- meta.llama2-70b-chat-v1
Description: Bedrock LLM ModelId

Resources:
Expand Down Expand Up @@ -72,18 +78,6 @@ Resources:
Role: !GetAtt 'BedrockBoto3ZipFunctionRole.Arn'
Timeout: 60
MemorySize: 512
Environment:
Variables:
BOTO3_BUCKET: !Ref BedrockBoto3Bucket
Code:
ZipFile: |
Type: AWS::Lambda::Function
Properties:
Handler: index.handler
Runtime: python3.11
Role: !GetAtt 'BedrockBoto3ZipFunctionRole.Arn'
Timeout: 60
MemorySize: 512
Environment:
Variables:
BOTO3_BUCKET: !Ref BedrockBoto3Bucket
Expand Down

0 comments on commit 964cd36

Please sign in to comment.