Skip to content

Commit

Permalink
Update docstrings to files.
Browse files Browse the repository at this point in the history
  • Loading branch information
ParagEkbote committed Jan 11, 2025
1 parent dda8266 commit 5d225b3
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 7 deletions.
32 changes: 29 additions & 3 deletions src/lighteval/models/endpoints/openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,18 +66,44 @@ class OpenAIModelConfig:
Configuration class to create an [[OpenAIModel]], to call via its API at inference for evaluation.
Attributes:
model: name or identifier of the OpenAI model to be used for inference.
model (str): name or identifier of the OpenAI model to be used for inference.
generation_parameters(None,GenerationParameters): Parameters for model generation. If not
provided, defaults to a new instance
of `GenerationParameters`.
"""

model: str
generation_parameters: GenerationParameters = None

def __post_init__(self):
"""
Post-initialization that ensures the `generation_parameters` is set
to a valid `GenerationParameters`. If not provided, initializes a default one.
"""
if not self.generation_parameters:
self.generation_parameters = GenerationParameters()

@classmethod
def from_path(cls, path: str) -> "OpenAIModelConfig":
"""
Creates an instance of `OpenAIModelConfig` from a YAML configuration file.
Loads the model configuration from a given file path and initializes the
`OpenAIModelConfig` with the model name and corresponding `GenerationParameters` parsed
from the file.
Args:
path (str): Path to the YAML configuration file containing the model configuration.
Returns:
OpenAIModelConfig: An instance of `OpenAIModelConfig` with the configuration loaded
from the specified YAML file.
Raises:
FileNotFoundError: If the specified file path does not exist.
KeyError: If required keys are missing in the YAML configuration file.
"""
import yaml

with open(path, "r") as f:
Expand Down Expand Up @@ -169,11 +195,11 @@ def greedy_until(
Generates responses using a greedy decoding strategy until certain ending conditions are met.
Args:
requests (list[Request]): list of requests containing the context and ending conditions.
requests (list[GreedyUntilRequest]): list of requests containing the context and ending conditions.
override_bs (int, optional): Override the batch size for generation. Defaults to None.
Returns:
list[GenerativeResponse]: list of generated responses.
list [GenerativeResponse]: list of generated responses.
"""
for request in requests:
request.tokenized_context = self.tok_encode(request.context)
Expand Down
9 changes: 5 additions & 4 deletions src/lighteval/models/transformers/adapter_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ class AdapterModelConfig(BaseModelConfig):
"""
Manages the configuration of adapter models. Adapter models are designed to extend or adapt a
base model's functionality for specific tasks while keeping most of the base model's parameters frozen.
Attributes:
base_model (str): The name of the parent base model. This model provides the tokenizer and configuration for the adapter model.
Defaults to None if not specified.
"""

# Adapter models have the specificity that they look at the base model (= the parent) for the tokenizer and config
Expand All @@ -66,17 +70,14 @@ def init_configs(self, env_config: EnvConfig):
env_configs(EnvConfig): An instance of EnvConfig.
Returns:
Any:
Any: Result of the configuration initialization.
"""
return self._init_configs(self.base_model, env_config)


class AdapterModel(BaseModel):
"""
Integrates the adapter models with a pre-trained base model.
Args:
"""

def _create_auto_tokenizer(self, config: AdapterModelConfig, env_config: EnvConfig) -> PreTrainedTokenizer:
Expand Down

0 comments on commit 5d225b3

Please sign in to comment.