diff --git a/src/lighteval/models/litellm_model.py b/src/lighteval/models/litellm_model.py index 9e29f569d..195221aa5 100644 --- a/src/lighteval/models/litellm_model.py +++ b/src/lighteval/models/litellm_model.py @@ -91,7 +91,7 @@ def __init__(self, config, env_config) -> None: self._tokenizer = encode self.pairwise_tokenization = False litellm.drop_params = True - litellm.verbose = True + litellm.set_verbose = False def _prepare_stop_sequence(self, stop_sequence): """Prepare and validate stop sequence.""" @@ -130,13 +130,16 @@ def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, stop_se "messages": prompt, "max_completion_tokens": max_new_tokens, "logprobs": return_logits if self.provider == "openai" else None, - "stop": stop_sequence, "base_url": self.base_url, "n": num_samples, - "temperature": self.TEMPERATURE, - "top_p": self.TOP_P, "caching": True, } + if "o1" in self.model: + logger.warning("O1 models do not support temperature, top_p, stop sequence. Disabling.") + else: + kwargs["temperature"] = self.TEMPERATURE + kwargs["top_p"] = self.TOP_P + kwargs["stop"] = stop_sequence response = litellm.completion(**kwargs)