From d0fab509b277920e88e528ebee0d74f25b7f1de1 Mon Sep 17 00:00:00 2001 From: "K.Y" Date: Sat, 8 Jun 2024 18:15:42 +0800 Subject: [PATCH] feat: Change the env variable `OPENAI_API_KEY` to `OPENAI_API_KEY_CONFIG` (#136) * feat: Change the env variable `OPENAI_API_KEY` to `OPENAI_API_KEY_CONFIG` * chore: update .github conf --- .env | 4 ++-- .env.example | 4 ++-- .github/ISSUE_TEMPLATE/bug-report-zh.yml | 2 +- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- openai_forward/__init__.py | 16 +--------------- openai_forward/config/interface.py | 14 +++++++------- openai_forward/console.py | 2 +- openai_forward/settings.py | 4 ++-- openai_forward/webui/run.py | 21 +++++++++++++++------ 9 files changed, 32 insertions(+), 37 deletions(-) diff --git a/.env b/.env index 4627396..3b01fdf 100644 --- a/.env +++ b/.env @@ -23,8 +23,8 @@ BENCHMARK_MODE=true FORWARD_CONFIG=[{"base_url":"https://api.openai.com","route":"/","type":"openai"}] #LEVEL_MODELS={"1": ["gpt-4"], "2": ["gpt-3.5-turbo"]} -#OPENAI_API_KEY={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]} -#FORWARD_KEY={"fk-0": 0, "fk-1": 1, "fk-2": 2, "default": 1} +#OPENAI_API_KEY_CONFIG={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]} +#FORWARD_KEY_CONFIG={"fk-0": 0, "fk-1": 1, "fk-2": 2, "default": 1} # `REQ_RATE_LIMIT`: i.e., Request rate limit for specified routes, user specific # format: {route: ratelimit-string} diff --git a/.env.example b/.env.example index e5ac51a..ec027f3 100644 --- a/.env.example +++ b/.env.example @@ -18,8 +18,8 @@ BENCHMARK_MODE=true FORWARD_CONFIG=[{"base_url":"https://api.openai.com","route":"/","type":"openai"},{"base_url":"https://generativelanguage.googleapis.com","route":"/gemini","type":"general"}] #LEVEL_MODELS={"1": ["gpt-4"], "2": ["gpt-3.5-turbo"]} -#OPENAI_API_KEY={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]} -#FORWARD_KEY={"fk-0": 0, "fk-1": 1, "fk-2": 2} +#OPENAI_API_KEY_CONFIG={"sk-xxx": [0], "sk-xxx": [1], "sk-xxx": [1,2]} +#FORWARD_KEY_CONFIG={"fk-0": 0, "fk-1": 1, "fk-2": 2} # `REQ_RATE_LIMIT`: 指定路由的请求速率限制(区分用户) # `REQ_RATE_LIMIT`: i.e., Request rate limit for specified routes, user specific diff --git a/.github/ISSUE_TEMPLATE/bug-report-zh.yml b/.github/ISSUE_TEMPLATE/bug-report-zh.yml index 4698648..96ab41d 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-zh.yml +++ b/.github/ISSUE_TEMPLATE/bug-report-zh.yml @@ -14,7 +14,7 @@ body: description: | 请确认以下所有项均被满足 options: - - label: 我运行的`openai-forward`版本不低于v0.7.0 + - label: 我运行的`openai-forward`版本不低于v0.8.0 required: true - type: textarea diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 0b4c22b..6a3db79 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -15,7 +15,7 @@ body: description: | Just a few checks to make sure you need to create a bug report. options: - - label: My `openai-forward` version is not lower than v0.7.0 + - label: My `openai-forward` version is not lower than v0.8.0 required: true - type: textarea diff --git a/openai_forward/__init__.py b/openai_forward/__init__.py index a7ea4d0..793a432 100644 --- a/openai_forward/__init__.py +++ b/openai_forward/__init__.py @@ -1,20 +1,6 @@ -__version__ = "0.8.0" +__version__ = "0.8.1" from dotenv import load_dotenv from yaml import load - -def yaml_load(filepath): - - try: - from yaml import CLoader as Loader - except ImportError: - from yaml import Loader - with open(filepath, mode='r', encoding="utf-8") as stream: - # stream = stream.read() - content = load(stream, Loader=Loader) - return content - - -# yaml_load() load_dotenv('.env', override=False) diff --git a/openai_forward/config/interface.py b/openai_forward/config/interface.py index 32a122d..bd13d23 100644 --- a/openai_forward/config/interface.py +++ b/openai_forward/config/interface.py @@ -84,22 +84,22 @@ class RateLimit(Base): token_rate_limit: List[RateLimitType] = [ RateLimitType( route="/v1/chat/completions", - value=[{"level": '0', "rate_limit": "60/second"}], + value=[{"level": '0', "limit": "60/second"}], ), RateLimitType( - route="/v1/completions", value=[{"level": '0', "rate_limit": "60/second"}] + route="/v1/completions", value=[{"level": '0', "limit": "60/second"}] ), ] req_rate_limit: List[RateLimitType] = [ RateLimitType( route="/v1/chat/completions", - value=[{"level": '0', "rate_limit": "100/2minutes"}], + value=[{"level": '0', "limit": "100/2minutes"}], ), RateLimitType( - route="/v1/completions", value=[{"level": '0', "rate_limit": "60/minute"}] + route="/v1/completions", value=[{"level": '0', "limit": "60/minute"}] ), RateLimitType( - route="/v1/embeddings", value=[{"level": '0', "rate_limit": "100/2minutes"}] + route="/v1/embeddings", value=[{"level": '0', "limit": "100/2minutes"}] ), ] iter_chunk: Literal['one-by-one', 'efficiency'] = 'one-by-one' @@ -136,8 +136,8 @@ def convert_to_env(self, set_env=False): value: str values = value.strip().replace(',', ',').split(',') openai_key_dict[key] = [int(i) for i in values] - env_dict['OPENAI_API_KEY'] = json.dumps(openai_key_dict) - env_dict['FORWARD_KEY'] = json.dumps(self.forward_key) + env_dict['OPENAI_API_KEY_CONFIG'] = json.dumps(openai_key_dict) + env_dict['FORWARD_KEY_CONFIG'] = json.dumps(self.forward_key) env_dict['LEVEL_MODELS'] = json.dumps(self.level) if set_env: os.environ.update(env_dict) diff --git a/openai_forward/console.py b/openai_forward/console.py index e86dd2e..793a0c0 100644 --- a/openai_forward/console.py +++ b/openai_forward/console.py @@ -68,7 +68,7 @@ def print_rate_limit_info( """ Print rate limit information. """ - table = Table(title="", box=None, width=61) + table = Table(title="", box=None) table.add_column("") table.add_column("", justify='left') backend = backend or "memory" diff --git a/openai_forward/settings.py b/openai_forward/settings.py index ed6c5c0..648cc72 100644 --- a/openai_forward/settings.py +++ b/openai_forward/settings.py @@ -119,8 +119,8 @@ IP_WHITELIST = env2list("IP_WHITELIST", sep=ENV_VAR_SEP) IP_BLACKLIST = env2list("IP_BLACKLIST", sep=ENV_VAR_SEP) -OPENAI_API_KEY = env2dict("OPENAI_API_KEY") -FWD_KEY = env2dict("FORWARD_KEY") +OPENAI_API_KEY = env2dict("OPENAI_API_KEY_CONFIG") +FWD_KEY = env2dict("FORWARD_KEY_CONFIG") LEVEL_MODELS = {int(key): value for key, value in env2dict("LEVEL_MODELS").items()} PROXY = os.environ.get("PROXY", "").strip() or None diff --git a/openai_forward/webui/run.py b/openai_forward/webui/run.py index 0dfd1e0..c06acb4 100644 --- a/openai_forward/webui/run.py +++ b/openai_forward/webui/run.py @@ -1,5 +1,6 @@ import ast import pickle +import secrets import threading import orjson @@ -78,6 +79,8 @@ def worker(log_socket: zmq.Socket, q: SimpleQueue): ), ) + st.write("---") + if st.button( "Apply and Restart", help="Saving configuration and reloading openai forward" ): @@ -94,6 +97,12 @@ def generate_env_content(): env_content = "\n".join([f"{key}={value}" for key, value in env_dict.items()]) return env_content + if st.button("Save to .env", help="Saving configuration to .env file"): + with st.spinner("Saving configuration to .env file."): + with open(".env", "w") as f: + f.write(generate_env_content()) + st.success("Configuration saved to .env file") + if st.button( "Export to .env file", ): @@ -142,7 +151,7 @@ def display_forward_configuration(): "> - type=general转发下的服务可以是任何服务(暂不支持websocket)" ) - st.write("#") + # st.write("#") submitted = st.form_submit_button("Save", use_container_width=True) if submitted: @@ -152,7 +161,7 @@ def display_forward_configuration(): if row["route"] is not None and row["base_url"] is not None ] - print(forward_config.convert_to_env()) + print("save forward config success") def display_api_key_configuration(): @@ -284,7 +293,7 @@ def display_api_key_configuration(): api_key.level = level_model_map - print(api_key.convert_to_env()) + print("save api key success") def display_cache_configuration(): @@ -334,7 +343,7 @@ def display_cache_configuration(): if row["cache_route"] is not None ] - print(cache.convert_to_env()) + print("save cache success") def display_rate_limit_configuration(): @@ -398,7 +407,7 @@ def display_rate_limit_configuration(): for _, row in edited_req_rate_limit_df.iterrows() ] - print(rate_limit.convert_to_env()) + print("save rate limit success") def display_other_configuration(): @@ -420,7 +429,7 @@ def display_other_configuration(): config.proxy = proxy config.benchmark_mode = benchmark_mode - print(config.convert_to_env()) + print("save other config success") if selected_section == "Forward":