diff --git a/sample.config.toml b/sample.config.toml index 62cde107..794ffc02 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -16,11 +16,13 @@ GEMINI = "" MISTRAL = "" GROQ = "" NETLIFY = "" +NEWAI = "" [API_ENDPOINTS] BING = "https://api.bing.microsoft.com/v7.0/search" GOOGLE = "https://www.googleapis.com/customsearch/v1" OLLAMA = "http://127.0.0.1:11434" +NEWAI = "https://api.newai.com/v1" LM_STUDIO = "http://localhost:1234/v1" OPENAI = "https://api.openai.com/v1" diff --git a/src/config.py b/src/config.py index a3303118..080febd7 100644 --- a/src/config.py +++ b/src/config.py @@ -182,6 +182,20 @@ def set_timeout_inference(self, value): def save_config(self): with open("config.toml", "w") as f: toml.dump(self.config, f) + + def get_newai_api_key(self): + return self.config["API_KEYS"]["NEWAI"] + + def get_newai_api_endpoint(self): + return self.config["API_ENDPOINTS"]["NEWAI"] + + def set_newai_api_key(self, key): + self.config["API_KEYS"]["NEWAI"] = key + self.save_config() + + def set_newai_api_endpoint(self, endpoint): + self.config["API_ENDPOINTS"]["NEWAI"] = endpoint + self.save_config() def update_config(self, data): for key, value in data.items(): diff --git a/src/llm/newai_client.py b/src/llm/newai_client.py new file mode 100644 index 00000000..057fd808 --- /dev/null +++ b/src/llm/newai_client.py @@ -0,0 +1,23 @@ +from openai import OpenAI + +from src.config import Config + +class NewAi: + def __init__(self): + config = Config() + api_key = config.get_newai_api_key() + base_url = config.get_newai_api_endpoint() + self.client = OpenAI(api_key=api_key, base_url=base_url) + + def inference(self, model_id: str, prompt: str) -> str: + chat_completion = self.client.chat.completions.create( + messages=[ + { + "role": "user", + "content": prompt.strip(), + } + ], + model=model_id, + temperature=0 + ) + return chat_completion.choices[0].message.content \ No newline at end of file