Skip to content

Commit

Permalink
Add support for personas (#170)
Browse files Browse the repository at this point in the history
- Add support for `travel`, `cooking` and `fitness` personas
- Add testing for the personas
- Update documentation with usage of the personas
  • Loading branch information
vsakkas authored May 12, 2024
1 parent 4a275d3 commit 2bbf6b0
Show file tree
Hide file tree
Showing 5 changed files with 121 additions and 4 deletions.
22 changes: 21 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# <img src="https://raw.githubusercontent.com/vsakkas/sydney.py/master/images/logo.svg" width="28px" /> Sydney.py

[![Latest Release](https://img.shields.io/github/v/release/vsakkas/sydney.py.svg)](https://github.com/vsakkas/sydney.py/releases/tag/v0.20.6)
[![Latest Release](https://img.shields.io/github/v/release/vsakkas/sydney.py.svg)](https://github.com/vsakkas/sydney.py/releases/tag/v0.21.0)
[![Python](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/)
[![MIT License](https://img.shields.io/badge/license-MIT-blue)](https://github.com/vsakkas/sydney.py/blob/master/LICENSE)

Expand All @@ -17,6 +17,7 @@ Python Client for Copilot (formerly named Bing Chat), also known as Sydney.
- Stream response tokens for real-time communication.
- Retrieve citations and suggested user responses.
- Enhance your prompts with images for an enriched experience.
- Customize your experience using any of the supported personas.
- Use asyncio for efficient and non-blocking I/O operations.

## Requirements
Expand Down Expand Up @@ -203,6 +204,25 @@ Searching the web is enabled by default.
> [!NOTE]
> Web search cannot be disabled when the response is streamed.

### Personas

It is possible to use specialized versions of Copilot, suitable for specific tasks or conversations:

```python
async with SydneyClient(persona="travel") as sydney:
response = await sydney.ask("Tourist attractions in Sydney")
print(response)
```

The available options for the `persona` parameter are:
- `copilot`
- `travel`
- `cooking`
- `fitness`

By default, Sydney will use the `copilot` persona.

### Compose

You can ask Copilot to compose different types of content, such emails, articles, ideas and more:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "sydney-py"
version = "0.20.6"
version = "0.21.0"
description = "Python Client for Copilot (formerly named Bing Chat), also known as Sydney."
authors = ["vsakkas <[email protected]>"]
license = "MIT"
Expand Down
25 changes: 25 additions & 0 deletions sydney/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,16 @@ class NoSearchOptions(Enum):
NOSEARCHALL = "nosearchall"


class PersonaOptions(Enum):
"""
Options that are used with the non default GPT personas.
"""

TRAVEL = "ai_persona_vacation_planner_with_examples"
COOKING = "ai_persona_cooking_assistant_w1shot"
FITNESS = "ai_persona_fitness_trainer_w1shot"


class DefaultComposeOptions(Enum):
"""
Options that are used in all compose API requests to Copilot.
Expand Down Expand Up @@ -160,6 +170,21 @@ class MessageType(Enum):
SEARCH_QUERY = "SearchQuery"


class GPTPersonaID(Enum):
"""
Allowed IDs for different GPT personas. Supported options are:
- `copilot` for using the default Copilot persona
- `travel` for using the vacation planner persona
- `cooking` for using the cooking assistant persona
- `fitness` for using the fitness trainer persona
"""

COPILOT = "copilot"
TRAVEL = "travel"
COOKING = "cooking"
FITNESS = "fitness"


class ResultValue(Enum):
"""
Copilot result values on raw responses. Supported options are:
Expand Down
16 changes: 15 additions & 1 deletion sydney/sydney.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,10 @@
CustomComposeTone,
DefaultComposeOptions,
DefaultOptions,
GPTPersonaID,
MessageType,
NoSearchOptions,
PersonaOptions,
ResultValue,
)
from sydney.exceptions import (
Expand All @@ -55,6 +57,7 @@ class SydneyClient:
def __init__(
self,
style: str = "balanced",
persona: str = "copilot",
bing_cookies: str | None = None,
use_proxy: bool = False,
) -> None:
Expand All @@ -66,6 +69,9 @@ def __init__(
style : str
The conversation style that Copilot will adopt. Must be one of the options listed
in the `ConversationStyle` enum. Default is "balanced".
persona : str
The GPT persona that Copilot will adopt. Must be one of the options listed in the
`GPTPersonaID` enum. Default is "copilot".
bing_cookies: str | None
The cookies from Bing required to connect and use Copilot. If not provided,
the `BING_COOKIES` environment variable is loaded instead. Default is None.
Expand All @@ -82,6 +88,7 @@ def __init__(
self.conversation_style_option_sets: ConversationStyleOptionSets = getattr(
ConversationStyleOptionSets, style.upper()
)
self.persona: GPTPersonaID = getattr(GPTPersonaID, persona.upper())
self.conversation_signature: str | None = None
self.encrypted_conversation_signature: str | None = None
self.conversation_id: str | None = None
Expand Down Expand Up @@ -142,6 +149,10 @@ def _build_ask_arguments(
if not search:
options_sets.extend(option.value for option in NoSearchOptions)

# Build option sets based on whether a non default GPT persona is used or not.
if self.persona != GPTPersonaID.COPILOT:
options_sets.append(PersonaOptions[self.persona.value.upper()].value)

image_url, original_image_url = None, None
if attachment_info:
image_url = BING_BLOB_URL + attachment_info["blobId"]
Expand All @@ -160,7 +171,7 @@ def _build_ask_arguments(
"conversationHistoryOptionsSets": [
option.value for option in ConversationHistoryOptionsSets
],
"gptId": "copilot",
"gptId": self.persona.value,
"isStartOfSession": self.invocation_id == 0,
"message": {
"author": "user",
Expand All @@ -175,6 +186,9 @@ def _build_ask_arguments(
"id": self.client_id,
},
"tone": str(self.conversation_style.value),
"extraExtensionParameters": {
"gpt-creator-persona": {"personaId": self.persona.value}
},
"spokenTextMode": "None",
"conversationId": self.conversation_id,
}
Expand Down
60 changes: 59 additions & 1 deletion tests/test_ask.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,65 @@ async def test_ask_logic_precise() -> bool:
score = 0
for expected_response in expected_responses:
score = fuzz.token_sort_ratio(response, expected_response)
if score >= 80:
if score >= 75:
return True

assert False, f"Unexpected response: {response}, match score: {score}"


@pytest.mark.asyncio
async def test_ask_travel_persona() -> bool:
expected_responses = [
"Hello! This is Vacation Planner. How can I assist you with your vacation plans today? 😊",
"Hello! This is Vacation Planner. How can I assist you with your vacation plans? 😊",
]

async with SydneyClient(persona="travel") as sydney:
response = await sydney.ask("Hello, Copilot!")

score = 0
for expected_response in expected_responses:
score = fuzz.token_sort_ratio(response, expected_response)
if score >= 75:
return True

assert False, f"Unexpected response: {response}, match score: {score}"


@pytest.mark.asyncio
async def test_ask_travel_cooking() -> bool:
expected_responses = [
"Hello! This is Cooking Assistant. How can I assist you today? 😊",
"Hello! This is Cooking Assistant. How can I assist you in the kitchen today? 😊",
]

async with SydneyClient(persona="cooking") as sydney:
response = await sydney.ask("Hello, Copilot!")

score = 0
for expected_response in expected_responses:
score = fuzz.token_sort_ratio(response, expected_response)
if score >= 75:
return True

assert False, f"Unexpected response: {response}, match score: {score}"


@pytest.mark.asyncio
async def test_ask_travel_fitness() -> bool:
expected_responses = [
"Hello! How can I assist you with your fitness journey today? 😊",
"Hello! This is Fitness Trainer. How can I assist you today? 😊",
"Hello! This is Fitness Trainer. How can I assist you with your fitness journey today? 💪",
]

async with SydneyClient(persona="fitness") as sydney:
response = await sydney.ask("Hello, Copilot!")

score = 0
for expected_response in expected_responses:
score = fuzz.token_sort_ratio(response, expected_response)
if score >= 75:
return True

assert False, f"Unexpected response: {response}, match score: {score}"

0 comments on commit 2bbf6b0

Please sign in to comment.