Skip to content
This repository has been archived by the owner on Oct 14, 2023. It is now read-only.

Commit

Permalink
Merge pull request #30 from LlmKira/chatgpt2
Browse files Browse the repository at this point in the history
Chatgpt Api 魔法兼容实现
  • Loading branch information
sudoskys authored Mar 2, 2023
2 parents f86a0ba + 1fc3a9a commit a7cb353
Show file tree
Hide file tree
Showing 20 changed files with 525 additions and 141 deletions.
103 changes: 87 additions & 16 deletions lab/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import time
from typing import List

from llm_kira.client.llms import ChatGptParam
from llm_kira.radio.anchor import SearchCraw
from loguru import logger
from llm_kira import radio
Expand Down Expand Up @@ -68,19 +69,6 @@ async def completion():
conversation_id=12094, # random.randint(1, 10000000),
)

llm = llm_kira.client.llms.OpenAi(
profile=conversation,
api_key=openaiApiKey,
token_limit=4000,
auto_penalty=False,
call_func=None,
)

mem = receiver.MemoryManager(profile=conversation)
chat_client = receiver.ChatBot(profile=conversation,
llm_model=llm
)


async def mood_hook():
_think = ThinkEngine(profile=conversation)
Expand All @@ -90,8 +78,84 @@ async def mood_hook():
print(_think.build_status(rank=5))


async def chatGpt():
llm = llm_kira.client.llms.ChatGpt(
profile=conversation,
api_key=openaiApiKey,
token_limit=4000,
auto_penalty=False,
call_func=None,
)

mem = llm_kira.client.MemoryManager(profile=conversation)
chat_client = llm_kira.client.ChatBot(
profile=conversation,
llm_model=llm
)
promptManager = llm_kira.creator.engine.PromptEngine(
reverse_prompt_buffer=False,
profile=conversation,
connect_words="\n",
memory_manger=mem,
llm_model=llm,
description="晚上了,这里是河边",
reference_ratio=0.5,
forget_words=["忘掉对话"],
optimizer=Optimizer.SinglePoint,
)
# 大型数据对抗测试
# promptManager.insert_prompt(prompt=PromptItem(start="Neko", text=random_string(8000)))
# promptManager.insert_prompt(prompt=PromptItem(start="Neko", text=random_string(500)))

# 多 prompt 对抗测试
testPrompt = input("TestPrompt:")
promptManager.insert_prompt(prompt=PromptItem(start="Neko", text="喵喵喵"))
promptManager.insert_interaction(Interaction(single=True, ask=PromptItem(start="alice", text="MewMewMewMew")))
_result = await promptManager.build_skeleton(query=testPrompt,
llm_task="Summary Text" if len(
testPrompt) > 20 else None,
skeleton=random.choice([SearchCraw(
deacon=["https://www.bing.com/search?q={}&form=QBLH"])])
)
_index = 1
for item in _result:
logger.trace(item.content)
item.ask.start = f"[{_index}]"
promptManager.insert_knowledge(knowledge=item)
_index += 1
promptManager.insert_knowledge(Interaction(single=True, ask=PromptItem(start="alice", text="MewMewMewMew")))
# 测试
promptManager.insert_prompt(prompt=PromptItem(start=conversation.start_name, text=testPrompt))
response = await chat_client.predict(
prompt=promptManager,
llm_param=ChatGptParam(model_name="gpt-3.5-turbo", temperature=0.8, presence_penalty=0.1, n=1, best_of=1),
predict_tokens=1000,
)
print(f"id {response.conversation_id}")
print(f"ask {response.ask}")
print(f"reply {response.reply}")
print(f"usage:{response.llm.usage}")
print(f"raw:{response.llm.raw}")
print(f"---{response.llm.time}---")
promptManager.clean(clean_prompt=True, clean_knowledge=False, clean_memory=False)
return "End"


async def chat():
promptManager = llm_kira.creator.PromptEngine(
llm = llm_kira.client.llms.OpenAi(
profile=conversation,
api_key=openaiApiKey,
token_limit=4000,
auto_penalty=False,
call_func=None,
)

mem = llm_kira.client.MemoryManager(profile=conversation)
chat_client = llm_kira.client.ChatBot(
profile=conversation,
llm_model=llm
)
promptManager = llm_kira.creator.engine.PromptEngine(
reverse_prompt_buffer=False,
profile=conversation,
connect_words="\n",
Expand Down Expand Up @@ -181,7 +245,13 @@ async def Sentiment():
async def Sim():
# response = llm_kira.utils.chat.Utils.edit_similarity(pre="4552", aft="1224")
# print(response)
response = llm_kira.utils.chat.Sim.cosion_similarity(pre="", aft="你是不是啊")
test1 = """
早苗(さなえ)
耕种水稻时刚刚种植的幼苗
原型是守矢早苗(もりやさなえ,生于1945年),守矢家第七十八代当主,是实际存在的人物。
守矢家是洩矢神的子孙,现任诹访神社下社神长官。洩矢神的祭祀司守矢家代代口传的祭神秘法。那个秘传是一脉相承的,在半夜没有火光的祈祷殿之中秘密传授。但是随着时代的变迁,世袭神官制度在明治五年被取消了。到明治六年,家传之宝(包括:印(印文「卖神祝印」)与镜、太刀等)从诹访大社上社被移走家里只残留下用佐奈伎铃(在大御立座祭神中所使用的)祭祀御左口神的方法。在明治时代,守矢实久(第七十六代当主)被取消了神长官一职,可惜当时口传秘法已失,实久只告诉了守矢真幸(第七十七代当主,实久之弟,诹访大社的祢宜宫司)剩下的部分。到守矢早苗(第七十八代当主,真幸之孙,平成18年(注)3月末从校长(注)[6]一职退下之后,一直致力于环境保护的演讲)这一代,已经不再继承代代相传的已消失的秘法了。到现在,再也没有人知道守矢祭神秘法了。
"""
response = llm_kira.utils.chat.Sim.cosion_similarity(pre=test1, aft="守矢家第七十八代当主")
print(response)


Expand Down Expand Up @@ -230,7 +300,8 @@ async def Web():
# asyncio.run(completion())
# asyncio.run(mood_hook())
# asyncio.run(Web())
asyncio.run(chat())
# asyncio.run(chat())
asyncio.run(chatGpt())
# asyncio.run(Moderation())
# asyncio.run(Sentiment())
# asyncio.run(KeyParse())
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "llm_kira"
version = "0.6.50"
version = "0.7.0"
description = "chatbot client for llm"
authors = ["sudoskys <[email protected]>"]
maintainers = [
Expand Down
5 changes: 4 additions & 1 deletion src/llm_kira/client/Optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,10 @@ def run(self) -> List[Interaction]:
knowledge = Scorer.build_weight(self.knowledge)
_knowledge_token_limit = int(self.token_limit * self.reference_ratio)
_interaction_token_limit = self.token_limit - _knowledge_token_limit
_returner = [Interaction(single=True, ask=PromptItem(start="*", text=self.desc))]

# Desc
_returner = [Interaction(single=True, ask=PromptItem(start="system", text=self.desc))]

_old_prompt = interaction[:1]
# Desc
if self.tokenizer(self.desc) > self.token_limit:
Expand Down
2 changes: 1 addition & 1 deletion src/llm_kira/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@

from .llms import openai
from .enhance import Support
from .agent import Conversation,MemoryManager
from .agent import Conversation, MemoryManager
from .anchor import ChatBot
1 change: 1 addition & 0 deletions src/llm_kira/client/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,4 @@ def read_context(self) -> List[Interaction]:
def save_context(self, message: List[Interaction], override: bool = True):
self._DataManager.save(interaction_flow=message, override=override)
return message

52 changes: 8 additions & 44 deletions src/llm_kira/client/anchor.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@
# from loguru import logger
from .llms.base import LlmBaseParam
from .llms.openai import LlmBase
from .types import LlmReturn, Interaction, PromptItem
from .types import LlmReturn, PromptItem
from ..creator.engine import PromptEngine
from ..error import LLMException

# Utils
from ..utils.chat import Sim
from ..error import LLMException

# Completion
from .types import ChatBotReturn
Expand All @@ -37,40 +35,17 @@ def __init__(self,
if llm_model is None:
raise LLMException("Whats your llm model?")

def __person(self, prompt, prompt_list):
_person_list = [f"{self.profile.start_name}:",
f"{self.profile.restart_name}:",
f"{self.profile.start_name}:",
f"{self.profile.restart_name}:",
]
for item in prompt_list:
if item.ask.connect_words.strip() in [":", ":"]:
_person_list.append(f"{item.ask.start}{item.ask.connect_words}")
_person_list = self.__rank_name(prompt=prompt.prompt, users=_person_list)
return _person_list

@staticmethod
def __rank_name(prompt: str, users: List[str]):
__temp = {}
for item in users:
__temp[item] = 0
users = list(__temp.keys())
_ranked = list(sorted(users, key=lambda i: Sim.cosion_similarity(pre=str(prompt), aft=str(i)), reverse=True))
return _ranked

async def predict(self,
prompt: PromptEngine,
predict_tokens: Union[int] = 100,
llm_param: LlmBaseParam = None,
parse_reply: Callable[[list], str] = None,
rank_name: bool = True,
) -> ChatBotReturn:
"""
:param prompt: PromptEngine
:param predict_tokens: 预测 Token 位
:param llm_param: 大语言模型参数
:param parse_reply: Callable[[list], str] 覆写解析方法
:param rank_name: 自动排序停止词减少第三人称的冲突出现
"""
self.prompt = prompt
# ReWrite
Expand All @@ -79,28 +54,17 @@ async def predict(self,
if predict_tokens > self.llm.get_token_limit():
# Or Auto Cut?
raise LLMException("Why your predict token > set token limit?")
_llm_result_limit = self.llm.get_token_limit() - predict_tokens
_llm_result_limit = _llm_result_limit if _llm_result_limit > 0 else 1
# Get
_prompt_index, _prompt = self.prompt.build_prompt(predict_tokens=predict_tokens)
_prompt_list = []
_person_list = None if not rank_name else self.__person(prompt=_prompt_index, prompt_list=_prompt)

# Prompt 构建
for item in _prompt:
_prompt_list.extend(item.content)

prompt_build = "\n".join(_prompt_list) + f"\n{self.profile.restart_name}:"
prompt_build = self.llm.resize_sentence(prompt_build, token=_llm_result_limit)
# ODO
# logger.trace(prompt_build)
# Get Question Index
_prompt_index = self.prompt.prompt
# Get
llm_result: LlmReturn = await self.llm.run(
prompt=prompt_build,
prompt=prompt,
predict_tokens=predict_tokens,
llm_param=llm_param,
stop_words=_person_list
llm_param=llm_param
)
prompt.clean(clean_prompt=True)

self.prompt.build_interaction(
ask=_prompt_index,
response=PromptItem(
Expand Down
4 changes: 1 addition & 3 deletions src/llm_kira/client/llms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,8 @@
import os
from abc import abstractmethod, ABC
from typing import Union, Optional, Callable, Any, Dict, Tuple, Mapping, List

from loguru import logger
from pydantic import BaseModel

from ..types import LlmReturn


Expand Down Expand Up @@ -87,7 +85,7 @@ def parse_usage(response) -> Optional[int]:

@abstractmethod
async def run(self,
prompt: str,
prompt: Any,
validate: Union[List[str], None] = None,
predict_tokens: int = 500,
llm_param: LlmBaseParam = None,
Expand Down
Loading

0 comments on commit a7cb353

Please sign in to comment.