diff --git a/src/lighteval/main_accelerate.py b/src/lighteval/main_accelerate.py index e962d5384..9069b7eb6 100644 --- a/src/lighteval/main_accelerate.py +++ b/src/lighteval/main_accelerate.py @@ -33,10 +33,10 @@ TOKEN = os.getenv("HF_TOKEN") CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") -HELP_PANNEL_NAME_1 = "Common Paramaters" -HELP_PANNEL_NAME_2 = "Logging Parameters" -HELP_PANNEL_NAME_3 = "Debug Paramaters" -HELP_PANNEL_NAME_4 = "Modeling Paramaters" +HELP_PANEL_NAME_1 = "Common Parameters" +HELP_PANEL_NAME_2 = "Logging Parameters" +HELP_PANEL_NAME_3 = "Debug Parameters" +HELP_PANEL_NAME_4 = "Modeling Parameters" def accelerate( # noqa C901 @@ -50,51 +50,51 @@ def accelerate( # noqa C901 tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = False, system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = None, dataset_loading_processes: Annotated[ - int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, custom_tasks: Annotated[ - Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANNEL_NAME_1) + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, cache_dir: Annotated[ - Optional[str], Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANNEL_NAME_1) + Optional[str], Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, num_fewshot_seeds: Annotated[ - int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, # === saving === output_dir: Annotated[ - str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANNEL_NAME_2) + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) ] = "results", push_to_hub: Annotated[ - bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, push_to_tensorboard: Annotated[ - bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, public_run: Annotated[ - bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, results_org: Annotated[ - Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANNEL_NAME_2) + Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANEL_NAME_2) ] = None, save_details: Annotated[ - bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, # === debug === max_samples: Annotated[ - Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANNEL_NAME_3) + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, override_batch_size: Annotated[ - int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANEL_NAME_3) ] = -1, job_id: Annotated[ - int, Option(help="Optional job id for future refenrence.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, ): """ diff --git a/src/lighteval/main_baseline.py b/src/lighteval/main_baseline.py index dd4786679..2dd970ea8 100644 --- a/src/lighteval/main_baseline.py +++ b/src/lighteval/main_baseline.py @@ -30,28 +30,28 @@ CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") -HELP_PANNEL_NAME_1 = "Common Paramaters" -HELP_PANNEL_NAME_2 = "Logging Parameters" -HELP_PANNEL_NAME_3 = "Debug Paramaters" -HELP_PANNEL_NAME_4 = "Modeling Paramaters" +HELP_PANEL_NAME_1 = "Common Parameters" +HELP_PANEL_NAME_2 = "Logging Parameters" +HELP_PANEL_NAME_3 = "Debug Parameters" +HELP_PANEL_NAME_4 = "Modeling Parameters" def baseline( tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], cache_dir: Annotated[ - str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANNEL_NAME_1) + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) ] = CACHE_DIR, custom_tasks: Annotated[ - Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANNEL_NAME_1) + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, dataset_loading_processes: Annotated[ - int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, output_dir: Annotated[ - str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANNEL_NAME_2) + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) ] = "results", max_samples: Annotated[ - Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANNEL_NAME_3) + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, ): """ diff --git a/src/lighteval/main_endpoint.py b/src/lighteval/main_endpoint.py index a9c23f481..ccfccd0aa 100644 --- a/src/lighteval/main_endpoint.py +++ b/src/lighteval/main_endpoint.py @@ -34,10 +34,10 @@ TOKEN = os.getenv("HF_TOKEN") CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") -HELP_PANNEL_NAME_1 = "Common Paramaters" -HELP_PANNEL_NAME_2 = "Logging Parameters" -HELP_PANNEL_NAME_3 = "Debug Paramaters" -HELP_PANNEL_NAME_4 = "Modeling Paramaters" +HELP_PANEL_NAME_1 = "Common Parameters" +HELP_PANEL_NAME_2 = "Logging Parameters" +HELP_PANEL_NAME_3 = "Debug Parameters" +HELP_PANEL_NAME_4 = "Modeling Parameters" @app.command(rich_help_panel="Evaluation Backends") @@ -52,52 +52,51 @@ def openai( tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = None, dataset_loading_processes: Annotated[ - int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, custom_tasks: Annotated[ - Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANNEL_NAME_1) + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, cache_dir: Annotated[ - str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANNEL_NAME_1) + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) ] = CACHE_DIR, num_fewshot_seeds: Annotated[ - int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, # === saving === output_dir: Annotated[ - str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANNEL_NAME_2) + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) ] = "results", push_to_hub: Annotated[ - bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, push_to_tensorboard: Annotated[ - bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, public_run: Annotated[ - bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, results_org: Annotated[ - Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANNEL_NAME_2) + Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANEL_NAME_2) ] = None, save_details: Annotated[ - bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, # === debug === max_samples: Annotated[ - Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANNEL_NAME_3) + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, job_id: Annotated[ - int, Option(help="Optional job id for future refenrence.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, ): """ Evaluate OPENAI models. """ from lighteval.logging.evaluation_tracker import EvaluationTracker - from lighteval.models.endpoints.openai_model import OpenAIModelConfig from lighteval.models.model_input import GenerationParameters from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters @@ -161,51 +160,51 @@ def inference_endpoint( tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = False, system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = None, dataset_loading_processes: Annotated[ - int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, custom_tasks: Annotated[ - Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANNEL_NAME_1) + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, cache_dir: Annotated[ - str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANNEL_NAME_1) + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) ] = CACHE_DIR, num_fewshot_seeds: Annotated[ - int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, # === saving === output_dir: Annotated[ - str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANNEL_NAME_2) + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) ] = "results", push_to_hub: Annotated[ - bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, push_to_tensorboard: Annotated[ - bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, public_run: Annotated[ - bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, results_org: Annotated[ - Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANNEL_NAME_2) + Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANEL_NAME_2) ] = None, save_details: Annotated[ - bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, # === debug === max_samples: Annotated[ - Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANNEL_NAME_3) + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, override_batch_size: Annotated[ - int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, job_id: Annotated[ - int, Option(help="Optional job id for future refenrence.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, ): """ @@ -276,51 +275,51 @@ def tgi( tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = False, system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = None, dataset_loading_processes: Annotated[ - int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, custom_tasks: Annotated[ - Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANNEL_NAME_1) + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, cache_dir: Annotated[ - str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANNEL_NAME_1) + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) ] = CACHE_DIR, num_fewshot_seeds: Annotated[ - int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, # === saving === output_dir: Annotated[ - str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANNEL_NAME_2) + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) ] = "results", push_to_hub: Annotated[ - bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, push_to_tensorboard: Annotated[ - bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, public_run: Annotated[ - bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, results_org: Annotated[ - Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANNEL_NAME_2) + Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANEL_NAME_2) ] = None, save_details: Annotated[ - bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, # === debug === max_samples: Annotated[ - Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANNEL_NAME_3) + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, override_batch_size: Annotated[ - int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANEL_NAME_3) ] = -1, job_id: Annotated[ - int, Option(help="Optional job id for future refenrence.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, ): """ @@ -328,7 +327,6 @@ def tgi( """ from lighteval.logging.evaluation_tracker import EvaluationTracker from lighteval.models.endpoints.tgi_model import TGIModelConfig - from lighteval.models.model_input import GenerationParameters from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) @@ -343,17 +341,8 @@ def tgi( # TODO (nathan): better handling of model_args parallelism_manager = ParallelismManager.TGI - with open(model_config_path, "r") as f: - config = yaml.safe_load(f)["model"] - generation_parameters = GenerationParameters.from_dict(config) - - model_config = TGIModelConfig( - inference_server_address=config["instance"]["inference_server_address"], - inference_server_auth=config["instance"]["inference_server_auth"], - model_id=config["instance"]["model_id"], - generation_parameters=generation_parameters, - ) + model_config = TGIModelConfig.from_path(model_config_path) pipeline_params = PipelineParameters( launcher_type=parallelism_manager, diff --git a/src/lighteval/main_nanotron.py b/src/lighteval/main_nanotron.py index 66826122e..94004c065 100644 --- a/src/lighteval/main_nanotron.py +++ b/src/lighteval/main_nanotron.py @@ -29,10 +29,10 @@ CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") -HELP_PANNEL_NAME_1 = "Common Paramaters" -HELP_PANNEL_NAME_2 = "Logging Parameters" -HELP_PANNEL_NAME_3 = "Debug Paramaters" -HELP_PANNEL_NAME_4 = "Modeling Paramaters" +HELP_PANEL_NAME_1 = "Common Parameters" +HELP_PANEL_NAME_2 = "Logging Parameters" +HELP_PANEL_NAME_3 = "Debug Parameters" +HELP_PANEL_NAME_4 = "Modeling Parameters" SEED = 1234 diff --git a/src/lighteval/main_vllm.py b/src/lighteval/main_vllm.py index b1742b117..89311b5ae 100644 --- a/src/lighteval/main_vllm.py +++ b/src/lighteval/main_vllm.py @@ -29,10 +29,10 @@ TOKEN = os.getenv("HF_TOKEN") CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") -HELP_PANNEL_NAME_1 = "Common Paramaters" -HELP_PANNEL_NAME_2 = "Logging Parameters" -HELP_PANNEL_NAME_3 = "Debug Paramaters" -HELP_PANNEL_NAME_4 = "Modeling Paramaters" +HELP_PANEL_NAME_1 = "Common Parameters" +HELP_PANEL_NAME_2 = "Logging Parameters" +HELP_PANEL_NAME_3 = "Debug Parameters" +HELP_PANEL_NAME_4 = "Modeling Parameters" def vllm( @@ -46,48 +46,48 @@ def vllm( tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = False, system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) + Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = None, dataset_loading_processes: Annotated[ - int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, custom_tasks: Annotated[ - Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANNEL_NAME_1) + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, cache_dir: Annotated[ - str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANNEL_NAME_1) + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) ] = CACHE_DIR, num_fewshot_seeds: Annotated[ - int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANNEL_NAME_1) + int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, # === saving === output_dir: Annotated[ - str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANNEL_NAME_2) + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) ] = "results", push_to_hub: Annotated[ - bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, push_to_tensorboard: Annotated[ - bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, public_run: Annotated[ - bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, results_org: Annotated[ - Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANNEL_NAME_2) + Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANEL_NAME_2) ] = None, save_details: Annotated[ - bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANNEL_NAME_2) + bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, # === debug === max_samples: Annotated[ - Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANNEL_NAME_3) + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, job_id: Annotated[ - int, Option(help="Optional job id for future refenrence.", rich_help_panel=HELP_PANNEL_NAME_3) + int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, ): """ diff --git a/src/lighteval/models/endpoints/endpoint_model.py b/src/lighteval/models/endpoints/endpoint_model.py index e50de5a36..2c9a0929a 100644 --- a/src/lighteval/models/endpoints/endpoint_model.py +++ b/src/lighteval/models/endpoints/endpoint_model.py @@ -122,6 +122,14 @@ def __post_init__(self): @classmethod def from_path(cls, path: str) -> "InferenceEndpointModelConfig": + """Load configuration for inference endpoint model from YAML file path. + + Args: + path (`str`): Path of the model configuration YAML file. + + Returns: + [`InferenceEndpointModelConfig`]: Configuration for inference endpoint model. + """ import yaml with open(path, "r") as f: diff --git a/src/lighteval/models/endpoints/tgi_model.py b/src/lighteval/models/endpoints/tgi_model.py index 58742e2f7..9ca5dc053 100644 --- a/src/lighteval/models/endpoints/tgi_model.py +++ b/src/lighteval/models/endpoints/tgi_model.py @@ -57,6 +57,22 @@ def __post_init__(self): if not self.generation_parameters: self.generation_parameters = GenerationParameters() + @classmethod + def from_path(cls, path: str) -> "TGIModelConfig": + """Load configuration for TGI endpoint model from YAML file path. + + Args: + path (`str`): Path of the model configuration YAML file. + + Returns: + [`TGIModelConfig`]: Configuration for TGI endpoint model. + """ + import yaml + + with open(path, "r") as f: + config = yaml.safe_load(f)["model"] + return cls(**config["instance"], generation_parameters=GenerationParameters.from_dict(config)) + # inherit from InferenceEndpointModel instead of LightevalModel since they both use the same interface, and only overwrite # the client functions, since they use a different client. diff --git a/tests/models/test_endpoint_model.py b/tests/models/endpoints/test_endpoint_model.py similarity index 100% rename from tests/models/test_endpoint_model.py rename to tests/models/endpoints/test_endpoint_model.py diff --git a/tests/models/endpoints/test_tgi_model.py b/tests/models/endpoints/test_tgi_model.py new file mode 100644 index 000000000..305034278 --- /dev/null +++ b/tests/models/endpoints/test_tgi_model.py @@ -0,0 +1,42 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import asdict + +import pytest + +from lighteval.models.endpoints.tgi_model import TGIModelConfig + + +class TestTGIModelConfig: + @pytest.mark.parametrize( + "config_path, expected_config", + [ + ( + "examples/model_configs/tgi_model.yaml", + {"inference_server_address": "", "inference_server_auth": None, "model_id": None}, + ), + ], + ) + def test_from_path(self, config_path, expected_config): + config = TGIModelConfig.from_path(config_path) + assert asdict(config) == expected_config