Skip to content

Commit

Permalink
Instruct/Execute format for responses to JSON, Fix docker-compose, re…
Browse files Browse the repository at this point in the history
…move old refs. (#244)

* Add Wacker's changes minus prints

* Prompt updates

* Prompt updates

* Force JSON validation

* Error handling

* Move log

* Context to None

* Reset i

* Only enforce JSON response on Instruct/Exec

* Move validation

* Add no commands return

* Context decay

* Change to 3 top results

* Context results count

* Remove commands_enabled flag

* Fix incorrect ref

* Set self.response to valid json

* Reorganizing

* Updates

* Set instruct prompt in chain step

* Update compose to pull image

* Update directions

* Clean up instructions

* Set network mode to host
  • Loading branch information
Josh-XT authored May 8, 2023
1 parent e132072 commit 02e8d8b
Show file tree
Hide file tree
Showing 24 changed files with 434 additions and 302 deletions.
154 changes: 91 additions & 63 deletions AgentLLM.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,20 +72,33 @@ def get_commands_string(self):
enabled_commands = filter(
lambda command: command.get("enabled", True), self.available_commands
)
if len(enabled_commands) == 0:
return "No commands."

friendly_names = map(
lambda command: f"{command['friendly_name']} - {command['name']}({command['args']})",
enabled_commands,
)
return "\n".join(friendly_names)

def run(
def validate_json(self, json_string: str):
try:
clean_response = re.findall(
r"```(?:json|css|vbnet|javascript)\n([\s\S]*?)\n```", json_string
)
clean_response = clean_response[0] if clean_response else json_string
response = json.loads(clean_response)
return response
except JSONDecodeError as e:
return False

def format_prompt(
self,
task: str,
max_context_tokens: int = 500,
top_results: int = 3,
long_term_access: bool = False,
commands_enabled: bool = True,
prompt: str = "",
max_context_tokens: int = 500,
prompt="",
**kwargs,
):
cp = CustomPrompt()
Expand All @@ -95,13 +108,15 @@ def run(
prompt = cp.get_model_prompt(prompt_name=prompt, model=self.CFG.AI_MODEL)
else:
prompt = CustomPrompt().get_prompt(prompt)

context = self.context_agent(
query=task,
top_results_num=3,
long_term_access=long_term_access,
max_tokens=max_context_tokens,
)
if top_results == 0:
context = "None"
else:
context = self.context_agent(
query=task,
top_results_num=top_results,
long_term_access=long_term_access,
max_tokens=max_context_tokens,
)
formatted_prompt = prompt.format(
task=task,
agent_name=self.agent_name,
Expand All @@ -110,59 +125,74 @@ def run(
objective=self.primary_objective,
**kwargs,
)
self.CFG.log_interaction("USER", task)
return formatted_prompt, prompt

def run(
self,
task: str,
max_context_tokens: int = 500,
long_term_access: bool = False,
prompt: str = "",
context_results: int = 3,
**kwargs,
):
formatted_prompt, unformatted_prompt = self.format_prompt(
task=task,
top_results=context_results,
long_term_access=long_term_access,
max_context_tokens=max_context_tokens,
prompt=prompt,
**kwargs,
)
self.response = self.CFG.instruct(formatted_prompt)
# Handle commands if in response
if "{COMMANDS}" in unformatted_prompt:
valid_json = self.validate_json(self.response)
while not valid_json:
print("Invalid JSON response. Trying again.")
# Begin context decay
if context_results != 0:
context_results = context_results - 1
else:
context_results = 0
formatted_prompt, unformatted_prompt = self.format_prompt(
task=task,
top_results=context_results,
long_term_access=long_term_access,
max_context_tokens=max_context_tokens,
prompt=prompt,
**kwargs,
)
self.response = self.CFG.instruct(formatted_prompt)
valid_json = self.validate_json(self.response)
if valid_json:
self.response = valid_json
response_parts = []
for command_name, command_args in self.response["commands"].items():
# Search for the command in the available_commands list, and if found, use the command's name attribute for execution
if command_name is not None:
for available_command in self.available_commands:
if command_name in [
available_command["friendly_name"],
available_command["name"],
]:
command_name = available_command["name"]
break
response_parts.append(
f"\n\n{self.commands.execute_command(command_name, command_args)}"
)
else:
if command_name == "None.":
response_parts.append(f"\n\nNo commands were executed.")
else:
response_parts.append(
f"\n\nCommand not recognized: {command_name}"
)
self.response = self.response.replace(prompt, "".join(response_parts))
if not self.CFG.NO_MEMORY:
self.store_result(task, self.response)
self.CFG.log_interaction("USER", task)
self.CFG.log_interaction(self.agent_name, self.response)
# Check if any commands are in the response and execute them with their arguments if so
if commands_enabled:
# Parse out everything after Commands: in self.response, each new line is a command
commands = re.findall(
r"(?i)Commands:[\n]*(.*)", f"{self.response}", re.DOTALL
)
if len(commands) > 0:
response_parts = []
for command in commands[0].split("\n"):
command = command.strip()
# Check if the command starts with a number and strip out everything until the first letter
if command and command[0].isdigit():
first_letter = re.search(r"[a-zA-Z]", command)
if first_letter:
command = command[first_letter.start() :]
command_name, command_args = None, {}
# Extract command name and arguments using regex
command_regex = re.search(r"(\w+)\((.*)\)", command)
if command_regex:
command_name, args_str = command_regex.groups()
if args_str:
# Parse arguments string into a dictionary
args_str = args_str.replace("'", '"')
args_str = args_str.replace("None", "null")
try:
command_args = json.loads(args_str)
except JSONDecodeError as e:
# error parsing args, send command_name to None so trying to execute command won't crash
command_name = None
print(f"Error: {e}")

# Search for the command in the available_commands list, and if found, use the command's name attribute for execution
if command_name is not None:
for available_command in self.available_commands:
if available_command["friendly_name"] == command_name:
command_name = available_command["name"]
break
response_parts.append(
f"\n\n{self.commands.execute_command(command_name, command_args)}"
)
else:
if command == "None.":
response_parts.append(f"\n\nNo commands were executed.")
else:
response_parts.append(f"\n\n{command}")
self.response = self.response.replace(
commands[0], "".join(response_parts)
)
print(f"Response: {self.response}")
return self.response

Expand Down Expand Up @@ -253,7 +283,6 @@ def task_creation_agent(
) -> List[Dict]:
response = self.run(
task=self.primary_objective,
commands_enabled=False,
prompt="task",
result=result,
task_description=task_description,
Expand All @@ -276,7 +305,6 @@ def prioritization_agent(self):

response = self.run(
task=self.primary_objective,
commands_enabled=False,
prompt="priority",
tasks=", ".join(task_names),
next_task_id=next_task_id,
Expand Down Expand Up @@ -330,7 +358,7 @@ def run_chain_step(self, step_data_list):
for step_data in step_data_list:
for prompt_type, prompt in step_data.items():
if prompt_type == "instruction":
self.run(prompt)
self.run(prompt, prompt="instruct")
elif prompt_type == "task":
self.run_task(prompt)
elif prompt_type == "command":
Expand Down
2 changes: 1 addition & 1 deletion Commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def execute_command(self, command_name: str, command_args: dict = None):
params[name] = value

try:
output = command_function(module, **params)
output = command_function(**params)
except Exception as e:
output = f"Error: {str(e)}"

Expand Down
6 changes: 1 addition & 5 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,6 @@ class ToggleCommandPayload(BaseModel):
enable: bool


class Prompt(BaseModel):
prompt: str


class CustomPromptModel(BaseModel):
prompt_name: str
prompt: str
Expand Down Expand Up @@ -210,7 +206,7 @@ async def instruct(agent_name: str, prompt: Prompt):
@app.post("/api/agent/{agent_name}/chat", tags=["Agent"])
async def chat(agent_name: str, prompt: Prompt):
agent = AgentLLM(agent_name)
response = agent.run(prompt.prompt, max_context_tokens=500, commands_enabled=False)
response = agent.run(prompt.prompt, max_context_tokens=500)
return {"response": str(response)}


Expand Down
1 change: 0 additions & 1 deletion commands/chain_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ def __init__(self):
if self.chains != None:
for chain in self.chains:
if "name" in chain:
name = f"Run Chain: {chain['name']}"
self.commands.update(
{f"Run Chain: {chain['name']}": self.run_chain}
)
Expand Down
4 changes: 2 additions & 2 deletions commands/work_with_ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ def __init__(self):
)

def ask(self, prompt: str) -> str:
response = AgentLLM().run(prompt, commands_enabled=False)
response = AgentLLM().run(prompt)
return response

def instruct(self, prompt: str) -> str:
response = AgentLLM().run(prompt, commands_enabled=True)
response = AgentLLM().run(task=prompt, prompt="instruct")
return response
6 changes: 4 additions & 2 deletions docker-compose-mac.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
version: "3.8"
services:
frontend:
build: ./frontend
image: ghcr.io/jamesonrgrieve/agent-llm-frontend:latest
init: true
network_mode: host
env_file:
- .env
environment:
NEXT_PUBLIC_API_URI: ${NEXT_PUBLIC_API_URI:-http://backend:7437}
NEXT_PUBLIC_API_URI: ${NEXT_PUBLIC_API_URI:-http://localhost:7437}
ports:
- "3000:3000"
depends_on:
Expand All @@ -17,6 +18,7 @@ services:
context: .
dockerfile: Dockerfile-mac-backend
init: true
network_mode: host
env_file:
- .env
ports:
Expand Down
6 changes: 4 additions & 2 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
version: "3.8"
services:
frontend:
build: ./frontend
image: ghcr.io/jamesonrgrieve/agent-llm-frontend:latest
init: true
network_mode: host
env_file:
- .env
environment:
NEXT_PUBLIC_API_URI: ${NEXT_PUBLIC_API_URI:-http://backend:7437}
NEXT_PUBLIC_API_URI: ${NEXT_PUBLIC_API_URI:-http://localhost:7437}
ports:
- "3000:3000"
depends_on:
Expand All @@ -17,6 +18,7 @@ services:
context: .
dockerfile: Dockerfile
init: true
network_mode: host
env_file:
- .env
ports:
Expand Down
42 changes: 25 additions & 17 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,13 @@ This project is under active development and may still have issues. We appreciat
- [Key Features 🗝️](#key-features-️)
- [Web Application Features](#web-application-features)
- [Quick Start with Docker](#quick-start-with-docker)
- [Running a Mac?](#running-a-mac)
- [Linux or Windows](#linux-or-windows)
- [MacOS](#macos)
- [Alternative: Quick Start for Local or Virtual Machine](#alternative-quick-start-for-local-or-virtual-machine)
- [Linux or MacOS:](#linux-or-macos)
- [Windows:](#windows)
- [Back End](#back-end)
- [Front End](#front-end)
- [Linux or MacOS](#linux-or-macos)
- [Windows](#windows)
- [Configuration](#configuration)
- [API Endpoints](#api-endpoints)
- [Extending Functionality](#extending-functionality)
Expand Down Expand Up @@ -101,49 +104,54 @@ The frontend web application of Agent-LLM provides an intuitive and interactive

## Quick Start with Docker

1. Clone the repositories for the Agent-LLM front/back ends then start the services with Docker.
Clone the repositories for the Agent-LLM front/back ends then start the services with Docker.

### Linux or Windows

```
git clone https://github.com/Josh-XT/Agent-LLM
cd Agent-LLM
git clone https://github.com/JamesonRGrieve/Agent-LLM-Frontend frontend --recurse-submodules
docker-compose up -d
```

2. Access the web interface at http://localhost:3000

### Running a Mac?

If you're getting errors, you may need to run the command below to run the containers set up for Mac.
### MacOS

```
git clone https://github.com/Josh-XT/Agent-LLM
cd Agent-LLM
docker compose -f docker-compose-mac.yml up -d
```

Access the web interface at http://localhost:3000

## Alternative: Quick Start for Local or Virtual Machine

As a reminder, this can be dangerous to run locally depending on what commands you give your agents access to. [⚠️ Run this in Docker or a Virtual Machine!](#️-run-this-in-docker-or-a-virtual-machine)

1. Open two separate terminals, the front end and back end will need to run separately.
2. In the first terminal, clone the repositories for the Agent-LLM back end and start it.
### Back End

Clone the repositories for the Agent-LLM back end and start it.

```
git clone https://github.com/Josh-XT/Agent-LLM
cd Agent-LLM
pip install -r requirements.txt
python app.py
```
3. In the second terminal, run the front end.

### Linux or MacOS:
### Front End
#### Linux or MacOS
```
docker run -it --pull always -p 80:3000 -e NEXT_PUBLIC_API_URI=http://$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1')':7437 ghcr.io/jamesonrgrieve/agent-llm-frontend:main
docker run -it --pull always -p 3000:3000 -e NEXT_PUBLIC_API_URI=http://$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1')':7437 ghcr.io/jamesonrgrieve/agent-llm-frontend:main
```

### Windows:
#### Windows
```
docker run -it --pull always -p 80:3000 -e NEXT_PUBLIC_API_URI=http://$(Get-NetIPAddress -AddressFamily IPv4 | Where-Object { $_.InterfaceAlias -ne "Loopback Pseudo-Interface 1" -and $_.AddressFamily -eq "IPv4" } | Select-Object -ExpandProperty IPAddress)`:7437 ghcr.io/jamesonrgrieve/agent-llm-frontend:main
docker run -it --pull always -p 3000:3000 -e NEXT_PUBLIC_API_URI=http://$(Get-NetIPAddress -AddressFamily IPv4 | Where-Object { $_.InterfaceAlias -ne "Loopback Pseudo-Interface 1" -and $_.AddressFamily -eq "IPv4" } | Select-Object -ExpandProperty IPAddress)`:7437 ghcr.io/jamesonrgrieve/agent-llm-frontend:main
```

Access the web interface at http://localhost:3000

## Configuration

Agent-LLM utilizes a `.env` configuration file to store AI language model settings, API keys, and other options. Use the supplied `.env.example` as a template to create your personalized `.env` file. Configuration settings include:
Expand Down
Loading

0 comments on commit 02e8d8b

Please sign in to comment.