Skip to content

Commit

Permalink
code change
Browse files Browse the repository at this point in the history
  • Loading branch information
BRama10 committed Nov 24, 2024
1 parent d056d33 commit d1104b3
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 59 deletions.
16 changes: 12 additions & 4 deletions aios/hooks/modules/agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from concurrent.futures import ThreadPoolExecutor, Future
from random import randint
from typing import Any, Tuple, Callable, Dict
from aios.hooks.syscall import useSysCall
from aios.hooks.types.agent import AgentSubmitDeclaration, FactoryParams
from aios.hooks.utils.validate import validate
from aios.hooks.stores import queue as QueueStore, processes as ProcessStore
Expand All @@ -17,6 +18,8 @@ def useFactory(
thread_pool = ThreadPoolExecutor(max_workers=params.max_workers)
manager = AgentManager('https://my.aios.foundation')

send_request, _ = useSysCall()

@validate(AgentSubmitDeclaration)
def submitAgent(declaration_params: AgentSubmitDeclaration) -> int:
"""
Expand All @@ -28,8 +31,6 @@ def submitAgent(declaration_params: AgentSubmitDeclaration) -> int:
Returns:
int: A unique process ID for the submitted agent.
"""
print('hi')

def run_agent(agent_name: str, task):
is_local = False

Expand All @@ -41,15 +42,21 @@ def run_agent(agent_name: str, task):
author=agent_name.split('/')[0],
name=agent_name.split('/')[1]
)

except:
is_local = True

if is_local:
agent_class, _ = manager.load_agent(local=True, path=agent_name)
else:
agent_class, _ = manager.load_agent(author, name, version)

agent = agent_class(agent_name, task)
agent = agent_class(agent_name, task, _)

agent.send_request = send_request

print(agent, 'hm,')
k = agent.run()
print(k)

return agent.run()

Expand Down Expand Up @@ -89,6 +96,7 @@ def awaitAgentExecution(process_id: str) -> Dict[str, Any]:
print(future)

if future:
print('heredd')
return future.result()
else:
raise ValueError(f"Process with ID '{process_id}' not found.")
Expand Down
1 change: 1 addition & 0 deletions aios/hooks/modules/tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def getMessage():

# Function to add messages to the queue
def addMessage(message: str):
print(message)
return QueueStore.addMessage(_, message)

# Function to check if the queue is empty
Expand Down
1 change: 1 addition & 0 deletions aios/tool/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ def __init__(

def address_request(self, syscall) -> None:
tool_calls = syscall.tool_calls
print(syscall.tool_calls, 'ttt')
for tool_call in tool_calls:
tool_org_and_name, tool_params = (
tool_call["name"],
Expand Down
87 changes: 38 additions & 49 deletions server.py → runtime/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Optional, Dict, Any
import uvicorn
from contextlib import asynccontextmanager
from dotenv import load_dotenv

from aios.hooks.modules.llm import useCore
from aios.hooks.modules.memory import useMemoryManager
Expand All @@ -15,6 +14,13 @@

from cerebrum.llm.communication import LLMQuery

# from cerebrum.llm.layer import LLMLayer as LLMConfig
# from cerebrum.memory.layer import MemoryLayer as MemoryConfig
# from cerebrum.storage.layer import StorageLayer as StorageConfig
# from cerebrum.tool.layer import ToolLayer as ToolManagerConfig

load_dotenv()

app = FastAPI()

# Store component configurations and instances
Expand All @@ -28,68 +34,67 @@

send_request, SysCallWrapper = useSysCall()


class LLMConfig(BaseModel):
llm_name: str
max_gpu_memory: dict | None = None
max_gpu_memory: dict | None = None
eval_device: str = "cuda:0"
max_new_tokens: int = 2048
log_mode: str = "INFO"
use_backend: str = "default"


class StorageConfig(BaseModel):
root_dir: str = "root"
use_vector_db: bool = False
vector_db_config: Optional[Dict[str, Any]] = None


class MemoryConfig(BaseModel):
memory_limit: int = 104857600 # 100MB in bytes
eviction_k: int = 10
custom_eviction_policy: Optional[str] = None


class ToolManagerConfig(BaseModel):
allowed_tools: Optional[list[str]] = None
custom_tools: Optional[Dict[str, Any]] = None

class SchedulerConfig(BaseModel):
log_mode: str = "INFO"
max_workers: int = 64
custom_syscalls: Optional[Dict[str, Any]] = None

class SchedulerConfig(BaseModel):
log_mode: str = "INFO"
max_workers: int = 64
custom_syscalls: Optional[Dict[str, Any]] = None


class AgentSubmit(BaseModel):
agent_id: str
agent_config: Dict[str, Any]

class QueryRequest(BaseModel):
agent_name: str
query_type: Literal["llm", "tool", "storage", "memory"]
query_data: LLMQuery

@app.post("/core/llm/setup")
async def setup_llm(config: LLMConfig):
"""Set up the LLM core component."""
# try:
# llm = useCore(
# llm_name=config.llm_name,
# max_gpu_memory=config.max_gpu_memory,
# eval_device=config.eval_device,
# max_new_tokens=config.max_new_tokens,
# log_mode=config.log_mode,
# use_backend=config.use_backend
# )
# active_components["llm"] = llm
# return {"status": "success", "message": "LLM core initialized"}
# except Exception as e:
# print(e)

# raise HTTPException(status_code=500, detail=f"Failed to initialize LLM core: {str(e)}")
llm = useCore(
llm_name=config.llm_name,
max_gpu_memory=config.max_gpu_memory,
eval_device=config.eval_device,
max_new_tokens=config.max_new_tokens,
log_mode=config.log_mode,
use_backend=config.use_backend,
)
active_components["llm"] = llm
return {"status": "success", "message": "LLM core initialized"}
try:
llm = useCore(
llm_name=config.llm_name,
max_gpu_memory=config.max_gpu_memory,
eval_device=config.eval_device,
max_new_tokens=config.max_new_tokens,
log_mode=config.log_mode,
use_backend=config.use_backend,
)
active_components["llm"] = llm
return {"status": "success", "message": "LLM core initialized"}
except Exception as e:
print(e)

raise HTTPException(status_code=500, detail=f"Failed to initialize LLM core: {str(e)}")


@app.post("/core/storage/setup")
async def setup_storage(config: StorageConfig):
Expand Down Expand Up @@ -204,7 +209,6 @@ async def setup_scheduler(config: SchedulerConfig):
get_memory_syscall=None,
get_storage_syscall=None,
get_tool_syscall=None,
# **(config.custom_syscalls or {})
)

active_components["scheduler"] = scheduler
Expand All @@ -217,7 +221,6 @@ async def setup_scheduler(config: SchedulerConfig):
status_code=500, detail=f"Failed to initialize scheduler: {str(e)}"
)


@app.get("/core/status")
async def get_status():
"""Get the status of all core components."""
Expand All @@ -226,12 +229,6 @@ async def get_status():
for component, instance in active_components.items()
}


class AgentSubmit(BaseModel):
agent_id: str
agent_config: Dict[str, Any]


@app.post("/agents/submit")
async def submit_agent(config: AgentSubmit):
"""Submit an agent for execution using the agent factory."""
Expand Down Expand Up @@ -293,14 +290,9 @@ async def cleanup_components():
)


class QueryRequest(BaseModel):
agent_name: str
query_type: Literal["llm", "tool", "storage", "memory"]
query_data: LLMQuery


@app.post("/query")
async def handle_query(request: QueryRequest):
print('received')
try:
if request.query_type == "llm":
query = LLMQuery(
Expand All @@ -313,6 +305,3 @@ async def handle_query(request: QueryRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))


if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
6 changes: 0 additions & 6 deletions tool_loader.py

This file was deleted.

0 comments on commit d1104b3

Please sign in to comment.