Skip to content

Commit

Permalink
Formatting fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
MarkDaoust committed Aug 30, 2023
1 parent 0ed880c commit 181c956
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 43 deletions.
18 changes: 9 additions & 9 deletions google/generativeai/discuss.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def _make_message(content: discuss_types.MessageOptions) -> glm.Message:

def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message]:
"""
Creates a list of glm.Message objects from the provided messages.
Creates a list of `glm.Message` objects from the provided messages.
This function takes a variety of message content inputs, such as strings, dictionaries,
or `glm.Message` objects, and creates a list of `glm.Message` objects. It ensures that
Expand All @@ -52,7 +52,7 @@ def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message]
messages: The messages to convert.
Returns:
A list of `glm.Message` objects with alternating authors if needed.
A list of `glm.Message` objects with alternating authors.
"""
if isinstance(messages, (str, dict, glm.Message)):
messages = [_make_message(messages)]
Expand Down Expand Up @@ -110,14 +110,14 @@ def _make_examples_from_flat(
"""
Creates a list of `glm.Example` objects from a list of message options.
This function takes a list of message options and pairs them into `glm.Example` objects.
The input examples must be in pairs to create valid examples.
This function takes a list of `discuss_types.MessageOptions` and pairs them into
`glm.Example` objects. The input examples must be in pairs to create valid examples.
Args:
examples: The list of message options.
examples: The list of `discuss_types.MessageOptions`.
Returns:
A list of glm.Example objects created from the provided message pairs.
A list of `glm.Example objects` created by pairing up the provided messages.
Raises:
ValueError: If the provided list of examples is not of even length.
Expand Down Expand Up @@ -158,7 +158,7 @@ def _make_examples(examples: discuss_types.ExamplesOptions) -> List[glm.Example]
examples: The examples to convert.
Returns:
A list of glm.Example objects created from the provided examples.
A list of `glm.Example` objects created from the provided examples.
"""
if isinstance(examples, glm.Example):
return [examples]
Expand Down Expand Up @@ -262,7 +262,7 @@ def _make_message_prompt(
examples: discuss_types.ExamplesOptions | None = None,
messages: discuss_types.MessagesOptions | None = None,
) -> glm.MessagePrompt:
"""Creates a glm.MessagePrompt object from the provided prompt components."""
"""Creates a `glm.MessagePrompt` object from the provided prompt components."""
prompt = _make_message_prompt_dict(
prompt=prompt, context=context, examples=examples, messages=messages
)
Expand All @@ -281,7 +281,7 @@ def _make_generate_message_request(
top_k: float | None = None,
prompt: discuss_types.MessagePromptOptions | None = None,
) -> glm.GenerateMessageRequest:
"""Creates a glm.GenerateMessageRequest object for generating messages."""
"""Creates a `glm.GenerateMessageRequest` object for generating messages."""
model = model_types.make_model_name(model)

prompt = _make_message_prompt(
Expand Down
22 changes: 12 additions & 10 deletions google/generativeai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,13 @@ class ModelsIterable(model_types.ModelsIterable):
if necessary based on the provided `page_size` and `page_token`.
Args:
`page_size` (int): The number of `models` to fetch per page.
`page_token` (str | None): Token representing the current page. Pass None for the first page.
`models` (List[model_types.Model]): List of models to iterate through.
`client` (glm.ModelServiceClient | None): An optional client for model service.
page_size: The number of `models` to fetch per page.
page_token: Token representing the current page. Pass `None` for the first page.
models: List of models to iterate through.
client: An optional client for the model service.
Returns:
An `ModelsIterable` iterable object that allows iterating through the models.
A `ModelsIterable` iterable object that allows iterating through the models.
"""

def __init__(
Expand All @@ -64,7 +64,7 @@ def __init__(
self._models = models
self._client = client

def __iter__(self) -> Iterable[model_types.Model]:
def __iter__(self) -> iterator[model_types.Model]:
"""
Returns an iterator over the models.
"""
Expand All @@ -84,7 +84,9 @@ def _next_page(self) -> ModelsIterable | None:
)


def _list_models(page_size, page_token, client) -> ModelsIterable:
def _list_models(
page_size: int, page_token: str | None, client: glm.ModelServiceClient
) -> ModelsIterable:
"""
Fetches a page of models using the provided client and pagination tokens.
Expand All @@ -93,9 +95,9 @@ def _list_models(page_size, page_token, client) -> ModelsIterable:
object to traverse through the models.
Args:
`page_size` (int): How many `types.Models` to fetch per page (api call).
`page_token`` (str): Token representing the current page.
`client` (`glm.ModelServiceClient`): The client to communicate with the model service.
page_size: How many `types.Models` to fetch per page (api call).
page_token: Token representing the current page.
client: The client to communicate with the model service.
Returns:
An iterable `ModelsIterable` object containing the fetched models and pagination info.
Expand Down
39 changes: 15 additions & 24 deletions google/generativeai/text.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,10 @@

def _make_text_prompt(prompt: str | dict[str, str]) -> glm.TextPrompt:
"""
Creates a TextPrompt object based on the provided prompt input.
Creates a `glm.TextPrompt` object based on the provided prompt input.
Args:
prompt (str | dict[str, str]): The prompt input, either a string or a dictionary.
prompt: The prompt input, either a string or a dictionary.
Returns:
glm.TextPrompt: A TextPrompt object containing the prompt text.
Expand Down Expand Up @@ -69,25 +69,16 @@ def _make_generate_text_request(
used for generating text using the chosen model.
Args:
`model` (`model_types.ModelNameOptions`, optional):
The model to use for text generation.
`prompt` (str | None, optional):
The prompt for text generation. Defaults to None.
`temperature` (float | None, optional):
The temperature for randomness in generation. Defaults to None.
`candidate_count` (int | None, optional):
The number of candidates to consider. Defaults to None.
`max_output_tokens` (int | None, optional):
The maximum number of output tokens. Defaults to None.
`top_p` (float | None, optional):
The nucleus sampling probability threshold. Defaults to None.
`top_k` (int | None, optional):
The top-k sampling parameter. Defaults to None.
`safety_settings` (`safety_types.SafetySettingOptions` | None, optional):
Safety settings for generated text. Defaults to None.
`stop_sequences` (str | Iterable[str] | None, optional):
Stop sequences to halt text generation.
Can be a string or iterable of strings. Defaults to None.
model: The model to use for text generation.
prompt: The prompt for text generation. Defaults to None.
temperature: The temperature for randomness in generation. Defaults to None.
candidate_count: The number of candidates to consider. Defaults to None.
max_output_tokens: The maximum number of output tokens. Defaults to None.
top_p: The nucleus sampling probability threshold. Defaults to None.
top_k: The top-k sampling parameter. Defaults to None.
safety_settings: Safety settings for generated text. Defaults to None.
stop_sequences: Stop sequences to halt text generation. Can be a string
or iterable of strings. Defaults to None.
Returns:
`glm.GenerateTextRequest`: A `GenerateTextRequest` object configured with the specified parameters.
Expand Down Expand Up @@ -202,9 +193,9 @@ def _generate_response(
Generates a response using the provided `glm.GenerateTextRequest` and client.
Args:
`request` (`glm.GenerateTextRequest`): The text generation request.
`client` (`glm.TextServiceClient`, optional):
The client to use for text generation. Defaults to None, in which case the default text client is used.
request: The text generation request.
client: The client to use for text generation. Defaults to None, in which
case the default text client is used.
Returns:
`Completion`: A `Completion` object with the generated text and response information.
Expand Down

0 comments on commit 181c956

Please sign in to comment.