Skip to content

Commit

Permalink
Multi-Shot Smart Instruct (#309)
Browse files Browse the repository at this point in the history
* SMART Agent-LLM?

* Give commands every step of the way.

* Inject memory every step of the way so it LEARNS

* No commands in validation

* Built custom prompts and smart instruct function

* Add smart instruct endpoint

* Add Smart Instruct endpoint
  • Loading branch information
Josh-XT authored May 11, 2023
1 parent ac74c6f commit 24c6cc2
Show file tree
Hide file tree
Showing 6 changed files with 108 additions and 1 deletion.
14 changes: 14 additions & 0 deletions API-Tests.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,20 @@
"print(data[\"response\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Test POST /api/agent/{agent_name}/smartinstruct/{shots}\n",
"# SMART Instruct the agent\n",
"data = {\"prompt\": \"Write a tweet about AI.\"}\n",
"response = requests.post(f\"{base_uri}/api/agent/huggingchat/smartinstruct/3\", json=data)\n",
"data = response.json()\n",
"print(data[\"response\"])"
]
},
{
"cell_type": "code",
"execution_count": 29,
Expand Down
20 changes: 19 additions & 1 deletion AgentLLM.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,15 @@ def format_prompt(
long_term_access=long_term_access,
max_tokens=max_context_tokens,
)
command_list = self.get_commands_string()
formatted_prompt = self.custom_format(
prompt,
task=task,
agent_name=self.agent_name,
COMMANDS=self.get_commands_string(),
COMMANDS=command_list,
context=context,
objective=self.primary_objective,
command_list=command_list,
**kwargs,
)
tokens = len(self.memories.nlp(formatted_prompt))
Expand Down Expand Up @@ -234,6 +236,22 @@ def run(
self.CFG.log_interaction(self.agent_name, self.response)
return self.response

def smart_instruct(
self,
task: str = "Write a tweet about AI.",
shots: int = 3,
):
answers = []
# Do multi shots of prompt to get N different answers to be validated
for i in range(shots):
answers.append(self.run(task=task, prompt="StepByStep"))
answer_str = ""
for i, answer in enumerate(answers):
answer_str += f"Answer {i + 1}:\n{answer}\n\n"
researcher = self.run(task=answer_str, prompt="Researcher")
resolver = self.run(task=researcher, prompt="Resolver")
return resolver

def get_status(self):
try:
return not self.stop_running_event.is_set()
Expand Down
7 changes: 7 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,13 @@ async def instruct(agent_name: str, prompt: Prompt):
return {"response": str(response)}


@app.post("/api/agent/{agent_name}/smartinstruct/{shots}", tags=["Agent"])
async def smartinstruct(agent_name: str, shots: int, prompt: Prompt):
agent = AgentLLM(agent_name)
response = agent.smart_instruct(task=prompt.prompt, shots=int(shots))
return {"response": str(response)}


@app.post("/api/agent/{agent_name}/chat", tags=["Agent"])
async def chat(agent_name: str, prompt: Prompt):
agent = AgentLLM(agent_name)
Expand Down
29 changes: 29 additions & 0 deletions prompts/Researcher.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
Commands Available To Complete Task:
{COMMANDS}

Context: {context}

You are a researcher task with investigating the {shots} response options provided.
1. List the flaws and faulty logic of each answer option.
2. Choose the appropriate commands to complete the research task.
3. Return the improved answer in full in the following JSON format:

{
"response": "The full improved answer.",
"commands": [
"command_name": {
"arg1": "val1",
"arg2": "val2"
},
"command_name2": {
"arg1": "val1",
"arg2": "val2",
"argN": "valN"
}
]
}

Lets work this out in a step by step way to be sure we have all the errors.

Responses:
{task}
30 changes: 30 additions & 0 deletions prompts/Resolver.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
Commands Available To Complete Task:
{COMMANDS}

Context:
{context}

You are a resolver tasked with:
1. Finding which of the {shots} answer options the researcher thought was best
2. Improving that answer
3. Choose the appropriate commands to complete the task.
4. Return the improved answer in full in the following JSON format:

{
"response": "The full improved answer.",
"commands": [
"command_name": {
"arg1": "val1",
"arg2": "val2"
},
"command_name2": {
"arg1": "val1",
"arg2": "val2",
"argN": "valN"
}
]
}

Let's work this out in a step by step way to be sure we have the right answer:

{task}
9 changes: 9 additions & 0 deletions prompts/StepByStep.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
Commands Available To Complete Task:
{command_list}

Context:
{context}

Task: {task}

Answer: Let's work this out in a step by step way to be sure we are doing what is asked.

0 comments on commit 24c6cc2

Please sign in to comment.