Skip to content

Commit

Permalink
Fixed issue with o1 in litellm. (#493)
Browse files Browse the repository at this point in the history
  • Loading branch information
JoelNiklaus authored Jan 20, 2025
1 parent 90d44c1 commit 3b89734
Showing 1 changed file with 7 additions and 4 deletions.
11 changes: 7 additions & 4 deletions src/lighteval/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def __init__(self, config, env_config) -> None:
self._tokenizer = encode
self.pairwise_tokenization = False
litellm.drop_params = True
litellm.verbose = True
litellm.set_verbose = False

def _prepare_stop_sequence(self, stop_sequence):
"""Prepare and validate stop sequence."""
Expand Down Expand Up @@ -130,13 +130,16 @@ def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, stop_se
"messages": prompt,
"max_completion_tokens": max_new_tokens,
"logprobs": return_logits if self.provider == "openai" else None,
"stop": stop_sequence,
"base_url": self.base_url,
"n": num_samples,
"temperature": self.TEMPERATURE,
"top_p": self.TOP_P,
"caching": True,
}
if "o1" in self.model:
logger.warning("O1 models do not support temperature, top_p, stop sequence. Disabling.")
else:
kwargs["temperature"] = self.TEMPERATURE
kwargs["top_p"] = self.TOP_P
kwargs["stop"] = stop_sequence

response = litellm.completion(**kwargs)

Expand Down

0 comments on commit 3b89734

Please sign in to comment.