Skip to content

Commit

Permalink
Update langchain & lagent webui prompt
Browse files Browse the repository at this point in the history
  • Loading branch information
mzr1996 committed Feb 2, 2024
1 parent 91cd0d3 commit 9091ad3
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 6 deletions.
1 change: 1 addition & 0 deletions webui/modules/agents/lagent_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def create_internlm2_agent(llm, tools, cfg) -> internlm2_agent.Internlm2Agent:
llm=llm,
plugin_executor=ActionExecutor(actions=tools),
protocol=internlm2_agent.Internlm2Protocol(
meta_prompt=cfg['meta_prompt'].strip(),
plugin_prompt=cfg['plugin_prompt'].strip(),
tool=dict(
begin='{start_token}{name}\n',
Expand Down
13 changes: 7 additions & 6 deletions webui/modules/agents/langchain_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@
messages=[
SystemMessagePromptTemplate(
prompt=PromptTemplate(
input_variables=['tool_names', 'tools', 'meta_prompt'],
input_variables=['tool_names', 'tools'],
template=
'{meta_prompt}\nYou have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}```\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Please use the markdown style file path link to display images and audios in the final answer. The thought and final answer should use the same language with the question. Format is Action:```$JSON_BLOB```then Observation'
'Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}```\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Please use the markdown style file path link to display images and audios in the final answer. The thought and final answer should use the same language with the question. Format is Action:```$JSON_BLOB```then Observation'
)),
MessagesPlaceholder(variable_name='chat_history', optional=True),
HumanMessagePromptTemplate(
Expand Down Expand Up @@ -188,7 +188,6 @@ def create_langchain_structure(llm, tools):
def generate_structured(question, state, history) -> Iterator[List[BaseModel]]:
from .. import shared
cfg = shared.agents_settings[shared.agent_name]
meta_prompt = cfg.get('meta_prompt') or ''
messages = []

mq = Queue()
Expand All @@ -199,11 +198,13 @@ def generate_structured(question, state, history) -> Iterator[List[BaseModel]]:
callback = GenerationCallback(mq, tools)
agent = create_langchain_structure(shared.llm, tools)

history = langchain_style_history(history)
history = langchain_style_history(history).messages
if cfg.get('meta_prompt'):
history = [lc_msg.HumanMessage(content=cfg['meta_prompt'])] + history

thread = Thread(
target=agent.invoke,
args=(dict(input=question, chat_history=history.messages, meta_prompt=meta_prompt),
dict(callbacks=[callback], )))
args=(dict(input=question, chat_history=history), dict(callbacks=[callback], )))
thread.start()
while thread.is_alive() or mq.qsize() > 0:
if mq.qsize() > 0:
Expand Down

0 comments on commit 9091ad3

Please sign in to comment.