diff --git a/agents/prompt_chaining copy.py b/agents/prompt_chaining copy.py
new file mode 100644
index 00000000..185ee30d
--- /dev/null
+++ b/agents/prompt_chaining copy.py
@@ -0,0 +1,91 @@
+from praisonaiagents.agent import Agent
+from praisonaiagents.task import Task
+from praisonaiagents.agents import PraisonAIAgents
+from typing import List, Dict
+import time
+
+def get_time_check():
+ current_time = int(time.time())
+ result = "even" if current_time % 2 == 0 else "odd"
+ print(f"Time check: {current_time} is {result}")
+ return result
+
+def create_prompt_chain():
+ # Create agents for each step in the chain
+ agent1 = Agent(
+ name="Time Checker",
+ role="Time checker",
+ goal="Check if the time is even or odd",
+ instructions="Check if the time is even or odd",
+ tools=[get_time_check]
+ )
+
+ agent2 = Agent(
+ name="Advanced Analyzer",
+ role="Advanced data analyzer",
+ goal="Perform in-depth analysis of processed data",
+ instructions="Analyze the processed data in detail"
+ )
+
+ agent3 = Agent(
+ name="Final Processor",
+ role="Final data processor",
+ goal="Generate final output based on analysis",
+ instructions="Create final output based on analyzed data"
+ )
+
+ # Create tasks for each step
+ initial_task = Task(
+ name="time_check",
+ description="Getting time check and checking if it is even or odd",
+ expected_output="Getting time check and checking if it is even or odd",
+ agent=agent1,
+ is_start=True, # Mark as the starting task
+ task_type="decision", # This task will make a decision
+ next_tasks=["advanced_analysis"], # Next task if condition passes
+ condition={
+ "even": ["advanced_analysis"], # If passes, go to advanced analysis
+ "odd": ["final_processing"] # If fails, exit the chain
+ }
+ )
+
+ analysis_task = Task(
+ name="advanced_analysis",
+ description="Perform advanced analysis on the processed data",
+ expected_output="Analyzed data ready for final processing",
+ agent=agent2,
+ next_tasks=["final_processing"]
+ )
+
+ final_task = Task(
+ name="final_processing",
+ description="Generate final output",
+ expected_output="Final processed result",
+ agent=agent3
+ )
+
+ # Create the workflow manager
+ workflow = PraisonAIAgents(
+ agents=[agent1, agent2, agent3],
+ tasks=[initial_task, analysis_task, final_task],
+ process="workflow", # Use workflow process type
+ verbose=True
+ )
+
+ return workflow
+
+def main():
+ # Create and run the prompt chain
+ workflow = create_prompt_chain()
+
+ # Run the workflow
+ results = workflow.start()
+
+ # Print results
+ print("\nWorkflow Results:")
+ for task_id, result in results["task_results"].items():
+ if result:
+ print(f"Task {task_id}: {result.raw}")
+
+if __name__ == "__main__":
+ main()
diff --git a/agents/prompt_chaining.py b/agents/prompt_chaining.py
new file mode 100644
index 00000000..185ee30d
--- /dev/null
+++ b/agents/prompt_chaining.py
@@ -0,0 +1,91 @@
+from praisonaiagents.agent import Agent
+from praisonaiagents.task import Task
+from praisonaiagents.agents import PraisonAIAgents
+from typing import List, Dict
+import time
+
+def get_time_check():
+ current_time = int(time.time())
+ result = "even" if current_time % 2 == 0 else "odd"
+ print(f"Time check: {current_time} is {result}")
+ return result
+
+def create_prompt_chain():
+ # Create agents for each step in the chain
+ agent1 = Agent(
+ name="Time Checker",
+ role="Time checker",
+ goal="Check if the time is even or odd",
+ instructions="Check if the time is even or odd",
+ tools=[get_time_check]
+ )
+
+ agent2 = Agent(
+ name="Advanced Analyzer",
+ role="Advanced data analyzer",
+ goal="Perform in-depth analysis of processed data",
+ instructions="Analyze the processed data in detail"
+ )
+
+ agent3 = Agent(
+ name="Final Processor",
+ role="Final data processor",
+ goal="Generate final output based on analysis",
+ instructions="Create final output based on analyzed data"
+ )
+
+ # Create tasks for each step
+ initial_task = Task(
+ name="time_check",
+ description="Getting time check and checking if it is even or odd",
+ expected_output="Getting time check and checking if it is even or odd",
+ agent=agent1,
+ is_start=True, # Mark as the starting task
+ task_type="decision", # This task will make a decision
+ next_tasks=["advanced_analysis"], # Next task if condition passes
+ condition={
+ "even": ["advanced_analysis"], # If passes, go to advanced analysis
+ "odd": ["final_processing"] # If fails, exit the chain
+ }
+ )
+
+ analysis_task = Task(
+ name="advanced_analysis",
+ description="Perform advanced analysis on the processed data",
+ expected_output="Analyzed data ready for final processing",
+ agent=agent2,
+ next_tasks=["final_processing"]
+ )
+
+ final_task = Task(
+ name="final_processing",
+ description="Generate final output",
+ expected_output="Final processed result",
+ agent=agent3
+ )
+
+ # Create the workflow manager
+ workflow = PraisonAIAgents(
+ agents=[agent1, agent2, agent3],
+ tasks=[initial_task, analysis_task, final_task],
+ process="workflow", # Use workflow process type
+ verbose=True
+ )
+
+ return workflow
+
+def main():
+ # Create and run the prompt chain
+ workflow = create_prompt_chain()
+
+ # Run the workflow
+ results = workflow.start()
+
+ # Print results
+ print("\nWorkflow Results:")
+ for task_id, result in results["task_results"].items():
+ if result:
+ print(f"Task {task_id}: {result.raw}")
+
+if __name__ == "__main__":
+ main()
diff --git a/agents/routing copy.py b/agents/routing copy.py
new file mode 100644
index 00000000..9cc3befc
--- /dev/null
+++ b/agents/routing copy.py
@@ -0,0 +1,82 @@
+from praisonaiagents.agent import Agent
+from praisonaiagents.task import Task
+from praisonaiagents.agents import PraisonAIAgents
+import time
+
+current_time = int(time.time())
+result = "even" if current_time % 2 == 0 else "odd"
+print(f"Time check: {current_time} is {result}")
+
+# Create specialized agents
+router = Agent(
+ name="Router",
+ role="Input Router",
+ goal="Evaluate input and determine routing path",
+ instructions="Analyze input and decide whether to proceed or exit",
+ tools=[get_time_check]
+)
+
+processor1 = Agent(
+ name="Processor 1",
+ role="Secondary Processor",
+ goal="Process valid inputs that passed initial check",
+ instructions="Process data that passed the routing check"
+)
+
+processor2 = Agent(
+ name="Processor 2",
+ role="Final Processor",
+ goal="Perform final processing on validated data",
+ instructions="Generate final output for processed data"
+)
+
+# Create tasks with routing logic
+routing_task = Task(
+ name="initial_routing",
+ description="check the time and return according to what is returned",
+ expected_output="pass or fail based on what is returned",
+ agent=router,
+ is_start=True,
+ task_type="decision",
+ condition={
+ "pass": ["process_valid"],
+ "fail": "exit"
+ }
+)
+
+processing_task = Task(
+ name="process_valid",
+ description="Process validated input",
+ expected_output="Processed data ready for final step",
+ agent=processor1,
+ next_tasks=["final_process"]
+)
+
+final_task = Task(
+ name="final_process",
+ description="Generate final output",
+ expected_output="Final processed result",
+ agent=processor2
+)
+
+# Create and run workflow
+workflow = PraisonAIAgents(
+ agents=[router, processor1, processor2],
+ tasks=[routing_task, processing_task, final_task],
+ process="workflow",
+ verbose=True
+)
+
+print("\nStarting Routing Workflow...")
+print("=" * 50)
+
+results = workflow.start()
+
+print("\nWorkflow Results:")
+print("=" * 50)
+for task_id, result in results["task_results"].items():
+ if result:
+ task_name = result.description
+ print(f"\nTask: {task_name}")
+ print(f"Result: {result.raw}")
+ print("-" * 50)
diff --git a/agents/routing.py b/agents/routing.py
new file mode 100644
index 00000000..06a5299b
--- /dev/null
+++ b/agents/routing.py
@@ -0,0 +1,83 @@
+from praisonaiagents.agent import Agent
+from praisonaiagents.task import Task
+from praisonaiagents.agents import PraisonAIAgents
+import time
+
+def get_time_check():
+ current_time = int(time.time())
+ result = "even" if current_time % 2 == 0 else "odd"
+ print(f"Time check: {current_time} is {result}")
+ return result
+
+# Create specialized agents
+router = Agent(
+ name="Router",
+ role="Input Router",
+ goal="Evaluate input and determine routing path",
+ instructions="Analyze input and decide whether to proceed or exit",
+ tools=[get_time_check]
+)
+
+processor1 = Agent(
+ name="Processor 1",
+ role="Secondary Processor",
+ goal="Process valid inputs that passed initial check",
+ instructions="Process data that passed the routing check"
+)
+
+processor2 = Agent(
+ name="Processor 2",
+ role="Final Processor",
+ goal="Perform final processing on validated data",
+ instructions="Generate final output for processed data"
+)
+
+# Create tasks with routing logic
+routing_task = Task(
+ name="initial_routing",
+ description="check the time and return according to what is returned",
+ expected_output="pass or fail based on what is returned",
+ agent=router,
+ is_start=True,
+ task_type="decision",
+ condition={
+ "pass": ["process_valid"],
+ "fail": ["process_invalid"]
+ }
+)
+
+processing_task = Task(
+ name="process_valid",
+ description="Process validated input",
+ expected_output="Processed data ready for final step",
+ agent=processor1,
+)
+
+final_task = Task(
+ name="process_invalid",
+ description="Generate final output",
+ expected_output="Final processed result",
+ agent=processor2
+)
+
+# Create and run workflow
+workflow = PraisonAIAgents(
+ agents=[router, processor1, processor2],
+ tasks=[routing_task, processing_task, final_task],
+ process="workflow",
+ verbose=True
+)
+
+print("\nStarting Routing Workflow...")
+print("=" * 50)
+
+results = workflow.start()
+
+print("\nWorkflow Results:")
+print("=" * 50)
+for task_id, result in results["task_results"].items():
+ if result:
+ task_name = result.description
+ print(f"\nTask: {task_name}")
+ print(f"Result: {result.raw}")
+ print("-" * 50)
diff --git a/cookbooks/general/async_example.py b/cookbooks/general/async_example.py
new file mode 100644
index 00000000..fe3b0388
--- /dev/null
+++ b/cookbooks/general/async_example.py
@@ -0,0 +1,233 @@
+import asyncio
+import time
+from typing import List, Dict
+from praisonaiagents import Agent, Task, PraisonAIAgents, TaskOutput
+from praisonaiagents.main import (
+ display_error,
+ display_interaction,
+ display_tool_call,
+ display_instruction,
+ error_logs,
+ Console
+)
+from duckduckgo_search import DDGS
+from pydantic import BaseModel
+
+console = Console()
+
+# 1. Define output model for structured results
+class SearchResult(BaseModel):
+ query: str
+ results: List[Dict[str, str]]
+ total_results: int
+
+# 2. Define both sync and async tools
+def sync_search_tool(query: str) -> List[Dict]:
+ """
+ Synchronous search using DuckDuckGo.
+ Args:
+ query (str): The search query.
+ Returns:
+ list: Search results
+ """
+ display_tool_call(f"Running sync search for: {query}", console)
+ time.sleep(1) # Simulate network delay
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=5):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+ return results
+ except Exception as e:
+ error_msg = f"Error during sync search: {e}"
+ display_error(error_msg, console)
+ error_logs.append(error_msg)
+ return []
+
+async def async_search_tool(query: str) -> List[Dict]:
+ """
+ Asynchronous search using DuckDuckGo.
+ Args:
+ query (str): The search query.
+ Returns:
+ list: Search results
+ """
+ display_tool_call(f"Running async search for: {query}", console)
+ await asyncio.sleep(1) # Simulate network delay
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=5):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+ return results
+ except Exception as e:
+ error_msg = f"Error during async search: {e}"
+ display_error(error_msg, console)
+ error_logs.append(error_msg)
+ return []
+
+# 3. Define both sync and async callbacks
+def sync_callback(output: TaskOutput):
+ display_interaction("Sync Callback", f"Processing output: {output.raw[:100]}...", markdown=True, console=console)
+ time.sleep(1) # Simulate processing
+ if output.output_format == "JSON":
+ display_tool_call(f"Processed JSON result: {output.json_dict}", console)
+ elif output.output_format == "Pydantic":
+ display_tool_call(f"Processed Pydantic result: {output.pydantic}", console)
+
+async def async_callback(output: TaskOutput):
+ display_interaction("Async Callback", f"Processing output: {output.raw[:100]}...", markdown=True, console=console)
+ await asyncio.sleep(1) # Simulate processing
+ if output.output_format == "JSON":
+ display_tool_call(f"Processed JSON result: {output.json_dict}", console)
+ elif output.output_format == "Pydantic":
+ display_tool_call(f"Processed Pydantic result: {output.pydantic}", console)
+
+# 4. Create agents with different tools
+sync_agent = Agent(
+ name="SyncAgent",
+ role="Synchronous Search Specialist",
+ goal="Perform synchronous searches and return structured results",
+ backstory="Expert in sync operations and data organization",
+ tools=[sync_search_tool],
+ self_reflect=False,
+ verbose=True,
+ markdown=True
+)
+
+async_agent = Agent(
+ name="AsyncAgent",
+ role="Asynchronous Search Specialist",
+ goal="Perform asynchronous searches and return structured results",
+ backstory="Expert in async operations and data organization",
+ tools=[async_search_tool],
+ self_reflect=False,
+ verbose=True,
+ markdown=True
+)
+
+# 5. Create tasks with different configurations
+sync_task = Task(
+ name="sync_search",
+ description="Search for 'Python programming' using sync tool and return structured results",
+ expected_output="SearchResult model with query details and results",
+ agent=sync_agent,
+ async_execution=False,
+ callback=sync_callback,
+ output_pydantic=SearchResult
+)
+
+async_task = Task(
+ name="async_search",
+ description="Search for 'Async programming' using async tool and return structured results",
+ expected_output="SearchResult model with query details and results",
+ agent=async_agent,
+ async_execution=True,
+ callback=async_callback,
+ output_pydantic=SearchResult
+)
+
+# 6. Create workflow tasks
+workflow_sync_task = Task(
+ name="workflow_sync",
+ description="Workflow sync search for 'AI trends' with structured output",
+ expected_output="SearchResult model with AI trends data",
+ agent=sync_agent,
+ async_execution=False,
+ is_start=True,
+ next_tasks=["workflow_async"],
+ output_pydantic=SearchResult
+)
+
+workflow_async_task = Task(
+ name="workflow_async",
+ description="Workflow async search for 'Future of AI' with structured output",
+ expected_output="SearchResult model with Future of AI data",
+ agent=async_agent,
+ async_execution=True,
+ output_pydantic=SearchResult
+)
+
+# 7. Example usage functions
+def run_sync_example():
+ """Run synchronous example"""
+ display_instruction("\nRunning Synchronous Example...", console)
+ agents = PraisonAIAgents(
+ agents=[sync_agent],
+ tasks=[sync_task],
+ verbose=1,
+ process="sequential"
+ )
+ result = agents.start()
+ display_interaction("Sync Example", f"Result: {result}", markdown=True, console=console)
+
+async def run_async_example():
+ """Run asynchronous example"""
+ display_instruction("\nRunning Asynchronous Example...", console)
+ agents = PraisonAIAgents(
+ agents=[async_agent],
+ tasks=[async_task],
+ verbose=1,
+ process="sequential"
+ )
+ result = await agents.astart()
+ display_interaction("Async Example", f"Result: {result}", markdown=True, console=console)
+
+async def run_mixed_example():
+ """Run mixed sync/async example"""
+ display_instruction("\nRunning Mixed Sync/Async Example...", console)
+ agents = PraisonAIAgents(
+ agents=[sync_agent, async_agent],
+ tasks=[sync_task, async_task],
+ verbose=1,
+ process="sequential"
+ )
+ result = await agents.astart()
+ display_interaction("Mixed Example", f"Result: {result}", markdown=True, console=console)
+
+async def run_workflow_example():
+ """Run workflow example with both sync and async tasks"""
+ display_instruction("\nRunning Workflow Example...", console)
+ agents = PraisonAIAgents(
+ agents=[sync_agent, async_agent],
+ tasks=[workflow_sync_task, workflow_async_task],
+ verbose=1,
+ process="workflow"
+ )
+ result = await agents.astart()
+ display_interaction("Workflow Example", f"Result: {result}", markdown=True, console=console)
+
+# 8. Main execution
+async def main():
+ """Main execution function"""
+ display_instruction("Starting PraisonAI Agents Examples...", console)
+
+ try:
+ # Run sync example in a separate thread to not block the event loop
+ loop = asyncio.get_event_loop()
+ await loop.run_in_executor(None, run_sync_example)
+
+ # Run async examples
+ await run_async_example()
+ await run_mixed_example()
+ await run_workflow_example()
+
+ if error_logs:
+ display_error("\nErrors encountered during execution:", console)
+ for error in error_logs:
+ display_error(error, console)
+ except Exception as e:
+ display_error(f"Error in main execution: {e}", console)
+ error_logs.append(str(e))
+
+if __name__ == "__main__":
+ # Run the main function
+ asyncio.run(main())
\ No newline at end of file
diff --git a/cookbooks/general/async_example_full.py b/cookbooks/general/async_example_full.py
new file mode 100644
index 00000000..22903b12
--- /dev/null
+++ b/cookbooks/general/async_example_full.py
@@ -0,0 +1,152 @@
+import asyncio
+import time
+from typing import List, Dict
+from praisonaiagents import Agent, Task, PraisonAIAgents, TaskOutput
+from duckduckgo_search import DDGS
+from pydantic import BaseModel
+
+# 1. Define output model for structured results
+class SearchResult(BaseModel):
+ query: str
+ results: List[Dict[str, str]]
+ total_results: int
+
+# 2. Define async tool
+async def async_search_tool(query: str) -> Dict:
+ """Perform asynchronous search and return structured results."""
+ await asyncio.sleep(1) # Simulate network delay
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=5):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+
+ return {
+ "query": query,
+ "results": results,
+ "total_results": len(results)
+ }
+ except Exception as e:
+ print(f"Error during async search: {e}")
+ return {
+ "query": query,
+ "results": [],
+ "total_results": 0
+ }
+
+# 3. Define async callback
+async def async_callback(output: TaskOutput):
+ await asyncio.sleep(1) # Simulate processing
+ if output.output_format == "JSON":
+ print(f"Processed JSON result: {output.json_dict}")
+ elif output.output_format == "Pydantic":
+ print(f"Processed Pydantic result: {output.pydantic}")
+
+# 4. Create specialized agents
+async_agent = Agent(
+ name="AsyncSearchAgent",
+ role="Search Specialist",
+ goal="Perform fast parallel searches with structured results",
+ backstory="Expert in efficient data retrieval and parallel search operations",
+ tools=[async_search_tool],
+ self_reflect=False,
+ verbose=True,
+ markdown=True
+)
+
+summary_agent = Agent(
+ name="SummaryAgent",
+ role="Research Synthesizer",
+ goal="Create concise summaries from multiple search results",
+ backstory="Expert in analyzing and synthesizing information from multiple sources",
+ self_reflect=True,
+ verbose=True,
+ markdown=True
+)
+
+# 5. Create async tasks
+async_task = Task(
+ name="async_search",
+ description="Search for 'Async programming' and return results in JSON format with query, results array, and total_results count.",
+ expected_output="SearchResult model with structured data",
+ agent=async_agent,
+ async_execution=True,
+ callback=async_callback,
+ output_json=SearchResult
+)
+
+async def run_parallel_tasks():
+ """Run multiple async tasks in parallel"""
+ print("\nRunning Parallel Async Tasks...")
+
+ # Define different search topics
+ search_topics = [
+ "Latest AI Developments 2024",
+ "Machine Learning Best Practices",
+ "Neural Networks Architecture"
+ ]
+
+ # Create tasks for different topics
+ parallel_tasks = [
+ Task(
+ name=f"search_task_{i}",
+ description=f"Search for '{topic}' and return structured results with query details and findings.",
+ expected_output="SearchResult model with search data",
+ agent=async_agent,
+ async_execution=True,
+ callback=async_callback,
+ output_json=SearchResult
+ ) for i, topic in enumerate(search_topics)
+ ]
+
+ # Create summarization task
+ summary_task = Task(
+ name="summary_task",
+ description="Analyze all search results and create a concise summary highlighting key findings, patterns, and implications.",
+ expected_output="Structured summary with key findings and insights",
+ agent=summary_agent,
+ async_execution=False,
+ callback=async_callback,
+ context=parallel_tasks
+ )
+
+ # Create a single PraisonAIAgents instance with both agents
+ agents = PraisonAIAgents(
+ agents=[async_agent, summary_agent],
+ tasks=parallel_tasks + [summary_task],
+ verbose=1,
+ process="sequential"
+ )
+
+ # Run all tasks
+ results = await agents.astart()
+ print(f"Tasks Results: {results}")
+
+ # Return results in a serializable format
+ return {
+ "search_results": {
+ "task_status": {k: v for k, v in results["task_status"].items() if k != summary_task.id},
+ "task_results": [str(results["task_results"][i]) if results["task_results"][i] else None
+ for i in range(len(parallel_tasks))]
+ },
+ "summary": str(results["task_results"][summary_task.id]) if results["task_results"].get(summary_task.id) else None,
+ "topics": search_topics
+ }
+
+# 6. Main execution
+async def main():
+ """Main execution function"""
+ print("Starting Async AI Agents Examples...")
+
+ try:
+ await run_parallel_tasks()
+ except Exception as e:
+ print(f"Error in main execution: {e}")
+
+if __name__ == "__main__":
+ # Run the main function
+ asyncio.run(main())
diff --git a/cookbooks/general/async_example_full_multigroups.py b/cookbooks/general/async_example_full_multigroups.py
new file mode 100644
index 00000000..5d4275c3
--- /dev/null
+++ b/cookbooks/general/async_example_full_multigroups.py
@@ -0,0 +1,248 @@
+import asyncio
+import time
+from typing import List, Dict
+from praisonaiagents import Agent, Task, PraisonAIAgents, TaskOutput
+from duckduckgo_search import DDGS
+from pydantic import BaseModel
+
+# 1. Define output model for structured results
+class SearchResult(BaseModel):
+ query: str
+ results: List[Dict[str, str]]
+ total_results: int
+
+# 2. Define async tool
+async def async_search_tool(query: str) -> Dict:
+ """
+ Asynchronous search using DuckDuckGo.
+ Args:
+ query (str): The search query.
+ Returns:
+ dict: Search results in SearchResult model format
+ """
+ await asyncio.sleep(1) # Simulate network delay
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=5):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+
+ # Format response to match SearchResult model
+ return {
+ "query": query,
+ "results": results,
+ "total_results": len(results)
+ }
+ except Exception as e:
+ print(f"Error during async search: {e}")
+ return {
+ "query": query,
+ "results": [],
+ "total_results": 0
+ }
+
+# 3. Define async callback
+async def async_callback(output: TaskOutput):
+ await asyncio.sleep(1) # Simulate processing
+ if output.output_format == "JSON":
+ print(f"Processed JSON result: {output.json_dict}")
+ elif output.output_format == "Pydantic":
+ print(f"Processed Pydantic result: {output.pydantic}")
+
+# 4. Create specialized agents
+async_agent = Agent(
+ name="AsyncSearchAgent",
+ role="Asynchronous Search Specialist",
+ goal="Perform fast and efficient asynchronous searches with structured results",
+ backstory="Expert in parallel search operations and data retrieval",
+ tools=[async_search_tool],
+ self_reflect=False,
+ verbose=True,
+ markdown=True
+)
+
+summary_agent = Agent(
+ name="SummaryAgent",
+ role="Research Synthesizer",
+ goal="Create comprehensive summaries and identify patterns across multiple search results",
+ backstory="""Expert in analyzing and synthesizing information from multiple sources.
+Skilled at identifying patterns, trends, and connections between different topics.
+Specializes in creating clear, structured summaries that highlight key insights.""",
+ self_reflect=True, # Enable self-reflection for better summary quality
+ verbose=True,
+ markdown=True
+)
+
+# 5. Create async tasks
+async_task = Task(
+ name="async_search",
+ description="""Search for 'Async programming' and return results in the following JSON format:
+{
+ "query": "the search query",
+ "results": [
+ {
+ "title": "result title",
+ "url": "result url",
+ "snippet": "result snippet"
+ }
+ ],
+ "total_results": number of results
+}""",
+ expected_output="SearchResult model with query details and results",
+ agent=async_agent,
+ async_execution=True,
+ callback=async_callback,
+ output_json=SearchResult
+)
+
+# 6. Example usage functions
+async def run_single_task():
+ """Run single async task"""
+ print("\nRunning Single Async Task...")
+ agents = PraisonAIAgents(
+ agents=[async_agent],
+ tasks=[async_task],
+ verbose=1,
+ process="sequential"
+ )
+ result = await agents.astart()
+ print(f"Single Task Result: {result}")
+
+async def run_parallel_tasks():
+ """Run multiple async tasks in parallel"""
+ print("\nRunning Parallel Async Tasks...")
+
+ # Define different search topics
+ search_topics = [
+ "Latest AI Developments 2024",
+ "Machine Learning Best Practices",
+ "Neural Networks Architecture"
+ ]
+
+ # Create tasks for different topics
+ parallel_tasks = [
+ Task(
+ name=f"search_task_{i}",
+ description=f"""Search for '{topic}' and return results in the following JSON format:
+{{
+ "query": "{topic}",
+ "results": [
+ {{
+ "title": "result title",
+ "url": "result url",
+ "snippet": "result snippet"
+ }}
+ ],
+ "total_results": number of results
+}}""",
+ expected_output="SearchResult model with detailed information",
+ agent=async_agent,
+ async_execution=True,
+ callback=async_callback,
+ output_json=SearchResult
+ ) for i, topic in enumerate(search_topics)
+ ]
+
+ # Create summarization task with the specialized summary agent
+ summary_task = Task(
+ name="summary_task",
+ description="""As a Research Synthesizer, analyze the search results and create a comprehensive summary. Your task:
+
+1. Analyze Results:
+ - Review all search results thoroughly
+ - Extract key findings from each topic
+ - Identify main themes and concepts
+
+2. Find Connections:
+ - Identify relationships between topics
+ - Spot common patterns or contradictions
+ - Note emerging trends across sources
+
+3. Create Structured Summary:
+ - Main findings per topic
+ - Cross-cutting themes
+ - Emerging trends
+ - Practical implications
+ - Future directions
+
+4. Quality Checks:
+ - Ensure all topics are covered
+ - Verify accuracy of connections
+ - Confirm clarity of insights
+ - Validate practical relevance
+
+Present the summary in a clear, structured format with sections for findings, patterns, trends, and implications.""",
+ expected_output="""A comprehensive research synthesis containing:
+- Detailed findings from each search topic
+- Cross-topic patterns and relationships
+- Emerging trends and their implications
+- Practical applications and future directions""",
+ agent=summary_agent, # Use the specialized summary agent
+ async_execution=False, # Run synchronously after search tasks
+ callback=async_callback
+ )
+
+ # First run parallel search tasks
+ agents = PraisonAIAgents(
+ agents=[async_agent],
+ tasks=parallel_tasks, # Only run search tasks first
+ verbose=1,
+ process="sequential"
+ )
+ search_results = await agents.astart()
+ print(f"Search Tasks Results: {search_results}")
+
+ # Create task objects with results for context
+ completed_tasks = []
+ for i, topic in enumerate(search_topics):
+ task = Task(
+ name=f"search_task_{i}_result",
+ description=f"Search results for: {topic}",
+ expected_output="Search results from previous task",
+ agent=async_agent,
+ result=search_results["task_results"][i]
+ )
+ completed_tasks.append(task)
+
+ # Update summary task with context from search results
+ summary_task.context = completed_tasks
+
+ # Run summarization task with summary agent
+ summary_agents = PraisonAIAgents(
+ agents=[summary_agent], # Use summary agent for synthesis
+ tasks=[summary_task],
+ verbose=1,
+ process="sequential"
+ )
+ summary_result = await summary_agents.astart()
+ print(f"Summary Task Result: {summary_result}")
+
+ # Return results in a serializable format
+ return {
+ "search_results": {
+ "task_status": search_results["task_status"],
+ "task_results": [str(result) if result else None for result in search_results["task_results"]]
+ },
+ "summary": str(summary_result),
+ "topics": search_topics
+ }
+
+# 7. Main execution
+async def main():
+ """Main execution function"""
+ print("Starting Async AI Agents Examples...")
+
+ try:
+ # Run different async patterns
+ await run_single_task()
+ await run_parallel_tasks()
+ except Exception as e:
+ print(f"Error in main execution: {e}")
+
+if __name__ == "__main__":
+ # Run the main function
+ asyncio.run(main())
diff --git a/cookbooks/general/auto_agents_example.py b/cookbooks/general/auto_agents_example.py
new file mode 100644
index 00000000..bdf7d5f7
--- /dev/null
+++ b/cookbooks/general/auto_agents_example.py
@@ -0,0 +1,11 @@
+from praisonaiagents import AutoAgents
+from praisonaiagents.tools import duckduckgo
+
+agents = AutoAgents(
+ instructions="Search for information about AI Agents",
+ tools=[duckduckgo],
+ process="sequential",
+ verbose=True
+)
+
+agents.start()
\ No newline at end of file
diff --git a/cookbooks/general/code_agents_example.py b/cookbooks/general/code_agents_example.py
new file mode 100644
index 00000000..dfac1f64
--- /dev/null
+++ b/cookbooks/general/code_agents_example.py
@@ -0,0 +1,50 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+import json
+from e2b_code_interpreter import Sandbox
+
+def code_interpreter(code: str):
+ """
+ A function to demonstrate running Python code dynamically using e2b_code_interpreter.
+ """
+ print(f"\n{'='*50}\n> Running following AI-generated code:\n{code}\n{'='*50}")
+ exec_result = Sandbox().run_code(code)
+ if exec_result.error:
+ print("[Code Interpreter error]", exec_result.error)
+ return {"error": str(exec_result.error)}
+ else:
+ results = []
+ for result in exec_result.results:
+ if hasattr(result, '__iter__'):
+ results.extend(list(result))
+ else:
+ results.append(str(result))
+ logs = {"stdout": list(exec_result.logs.stdout), "stderr": list(exec_result.logs.stderr)}
+ return json.dumps({"results": results, "logs": logs})
+
+code_agent = Agent(
+ name="code_agent",
+ llm="gpt-4o-mini",
+ backstory="Expert in writing Python scripts",
+ self_reflect=False
+)
+execution_agent = Agent(
+ name="execution_agent",
+ llm="gpt-4o-mini",
+ backstory="Expert in executing Python scripts",
+ self_reflect=False,
+ tools=[code_interpreter]
+)
+
+code_agent_task = Task(
+ description="Write a simple Python script to print 'Hello, World!'",
+ expected_output="A Python script that prints 'Hello, World!'",
+ agent=code_agent
+)
+execution_agent_task = Task(
+ description="Execute the Python script",
+ expected_output="The output of the Python script",
+ agent=execution_agent
+)
+
+agents = PraisonAIAgents(agents=[code_agent, execution_agent], tasks=[code_agent_task, execution_agent_task])
+agents.start()
\ No newline at end of file
diff --git a/cookbooks/general/example_callback.py b/cookbooks/general/example_callback.py
new file mode 100644
index 00000000..f8775872
--- /dev/null
+++ b/cookbooks/general/example_callback.py
@@ -0,0 +1,204 @@
+from praisonaiagents import (
+ Agent,
+ Task,
+ PraisonAIAgents,
+ error_logs,
+ register_display_callback,
+ sync_display_callbacks,
+ async_display_callbacks
+)
+from duckduckgo_search import DDGS
+from rich.console import Console
+import json
+from datetime import datetime
+import logging
+
+# Setup logging
+logging.basicConfig(
+ filename='ai_interactions.log',
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+
+# Callback functions for different display types
+def interaction_callback(message=None, response=None, markdown=None, generation_time=None):
+ """Callback for display_interaction"""
+ logging.info(f"""
+ === INTERACTION ===
+ Time: {datetime.now()}
+ Generation Time: {generation_time}s
+ Message: {message}
+ Response: {response}
+ Markdown: {markdown}
+ """)
+
+def error_callback(message=None):
+ """Callback for display_error"""
+ logging.error(f"""
+ === ERROR ===
+ Time: {datetime.now()}
+ Message: {message}
+ """)
+
+def tool_call_callback(message=None):
+ """Callback for display_tool_call"""
+ logging.info(f"""
+ === TOOL CALL ===
+ Time: {datetime.now()}
+ Message: {message}
+ """)
+
+def instruction_callback(message=None):
+ """Callback for display_instruction"""
+ logging.info(f"""
+ === INSTRUCTION ===
+ Time: {datetime.now()}
+ Message: {message}
+ """)
+
+def self_reflection_callback(message=None):
+ """Callback for display_self_reflection"""
+ logging.info(f"""
+ === SELF REFLECTION ===
+ Time: {datetime.now()}
+ Message: {message}
+ """)
+
+def generating_callback(content=None, elapsed_time=None):
+ """Callback for display_generating"""
+ logging.info(f"""
+ === GENERATING ===
+ Time: {datetime.now()}
+ Content: {content}
+ Elapsed Time: {elapsed_time}
+ """)
+
+# Register all callbacks
+register_display_callback('interaction', interaction_callback)
+register_display_callback('error', error_callback)
+register_display_callback('tool_call', tool_call_callback)
+register_display_callback('instruction', instruction_callback)
+register_display_callback('self_reflection', self_reflection_callback)
+# register_display_callback('generating', generating_callback)
+
+def task_callback(output):
+ """Callback for task completion"""
+ logging.info(f"""
+ === TASK COMPLETED ===
+ Time: {datetime.now()}
+ Description: {output.description}
+ Agent: {output.agent}
+ Output: {output.raw[:200]}...
+ """)
+
+def internet_search_tool(query) -> list:
+ """
+ Perform a search using DuckDuckGo.
+
+ Args:
+ query (str): The search query.
+
+ Returns:
+ list: A list of search result titles and URLs.
+ """
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=10):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+ return results
+
+ except Exception as e:
+ print(f"Error during DuckDuckGo search: {e}")
+ return []
+
+def main():
+ # Create agents
+ researcher = Agent(
+ name="Researcher",
+ role="Senior Research Analyst",
+ goal="Uncover cutting-edge developments in AI and data science",
+ backstory="""You are an expert at a technology research group,
+ skilled in identifying trends and analyzing complex data.""",
+ verbose=True,
+ allow_delegation=False,
+ tools=[internet_search_tool],
+ llm="gpt-4o",
+ markdown=True,
+ reflect_llm="gpt-4o",
+ min_reflect=2,
+ max_reflect=4
+ )
+
+ writer = Agent(
+ name="Writer",
+ role="Tech Content Strategist",
+ goal="Craft compelling content on tech advancements",
+ backstory="""You are a content strategist known for
+ making complex tech topics interesting and easy to understand.""",
+ verbose=True,
+ allow_delegation=True,
+ llm="gpt-4o",
+ tools=[],
+ markdown=True
+ )
+
+ # Create tasks with callbacks
+ task1 = Task(
+ name="research_task",
+ description="""Analyze 2024's AI advancements.
+ Find major trends, new technologies, and their effects.""",
+ expected_output="""A detailed report on 2024 AI advancements""",
+ agent=researcher,
+ tools=[internet_search_tool],
+ callback=task_callback
+ )
+
+ task2 = Task(
+ name="writing_task",
+ description="""Create a blog post about major AI advancements using the insights you have.
+ Make it interesting, clear, and suited for tech enthusiasts.
+ It should be at least 4 paragraphs long.""",
+ expected_output="A blog post of at least 4 paragraphs",
+ agent=writer,
+ context=[task1],
+ callback=task_callback,
+ tools=[]
+ )
+
+ task3 = Task(
+ name="json_task",
+ description="""Create a json object with a title of "My Task" and content of "My content".""",
+ expected_output="""JSON output with title and content""",
+ agent=researcher,
+ callback=task_callback
+ )
+
+ task4 = Task(
+ name="save_output_task",
+ description="""Save the AI blog post to a file""",
+ expected_output="""File saved successfully""",
+ agent=writer,
+ context=[task2],
+ output_file='test.txt',
+ create_directory=True,
+ callback=task_callback
+ )
+
+ # Create and run agents manager
+ agents = PraisonAIAgents(
+ agents=[researcher, writer],
+ tasks=[task1, task2, task3, task4],
+ verbose=True,
+ process="sequential",
+ manager_llm="gpt-4o"
+ )
+
+ agents.start()
+
+if __name__ == "__main__":
+ main()
diff --git a/cookbooks/general/example_custom_tools.py b/cookbooks/general/example_custom_tools.py
new file mode 100644
index 00000000..969faf19
--- /dev/null
+++ b/cookbooks/general/example_custom_tools.py
@@ -0,0 +1,151 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+from typing import List, Dict, Union
+from duckduckgo_search import DDGS
+from langchain_community.tools import YouTubeSearchTool
+from langchain_community.utilities import WikipediaAPIWrapper
+
+# 1. Tool
+def internet_search_tool(query: str) -> List[Dict]:
+ """
+ Perform a search using DuckDuckGo.
+
+ Args:
+ query (str): The search query.
+
+ Returns:
+ list: A list of search result titles, URLs, and snippets.
+ """
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=10):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+ return results
+
+ except Exception as e:
+ print(f"Error during DuckDuckGo search: {e}")
+ return []
+
+def youtube_search_tool(query: str, inspect: bool = False, max_results: int = 2):
+ """
+ Provide a custom wrapper around the YouTubeSearchTool.
+
+ Args:
+ query (str): The search query for YouTube.
+ inspect (bool): If True, returns tool inspection info instead of search results.
+ max_results (int): Maximum number of results to return (default: 2).
+ Returns:
+ Union[List[str], dict]: List of YouTube video URLs or tool inspection info.
+ """
+ yt = YouTubeSearchTool()
+
+ if inspect:
+ inspection_info = {
+ "type": type(yt),
+ "attributes": [attr for attr in dir(yt) if not attr.startswith('_')],
+ "methods": {
+ "run": getattr(yt, 'run', None),
+ "arun": getattr(yt, 'arun', None)
+ },
+ "properties": {
+ "name": getattr(yt, 'name', 'youtube_search'),
+ "description": getattr(yt, 'description', 'Search YouTube videos'),
+ "return_direct": getattr(yt, 'return_direct', False)
+ }
+ }
+ return inspection_info
+
+ # Format query with max_results
+ formatted_query = f"{query}, {max_results}"
+ return yt.run(formatted_query)
+
+def wikipedia_search_tool(query: str, inspect: bool = False, max_chars: int = 4000, top_k: int = 3):
+ """
+ Provide a custom wrapper around langchain_community's WikipediaAPIWrapper.
+
+ Args:
+ query (str): A search query for Wikipedia.
+ inspect (bool): If True, returns tool inspection info instead of search results.
+ max_chars (int): Maximum characters to return (default: 4000).
+ top_k (int): Number of top results to consider (default: 3).
+ Returns:
+ Union[str, dict]: Summary from Wikipedia or tool inspection info if inspect=True.
+ """
+ w = WikipediaAPIWrapper(
+ top_k_results=top_k,
+ doc_content_chars_max=max_chars,
+ lang='en'
+ )
+
+ if inspect:
+ inspection_info = {
+ "type": type(w),
+ "attributes": [attr for attr in dir(w) if not attr.startswith('_')],
+ "methods": {
+ "run": getattr(w, 'run', None),
+ "arun": getattr(w, 'arun', None)
+ },
+ "properties": {
+ "name": "wikipedia",
+ "description": "Search and get summaries from Wikipedia",
+ "top_k": w.top_k_results,
+ "lang": w.lang,
+ "max_chars": w.doc_content_chars_max
+ }
+ }
+ return inspection_info
+
+ try:
+ result = w.run(query)
+ return result
+ except Exception as e:
+ return f"Error searching Wikipedia: {str(e)}"
+
+# 2. Agent
+data_agent = Agent(
+ name="DataCollector",
+ role="Search Specialist",
+ goal="Perform internet searches to collect relevant information.",
+ backstory="Expert in finding and organizing internet data from multiple sources.",
+ tools=[internet_search_tool, youtube_search_tool, wikipedia_search_tool],
+ self_reflect=False
+)
+
+# 3. Tasks
+collect_task = Task(
+ description="Perform an internet search using the query: 'AI job trends in 2024'. Return results as a list of title, URL, and snippet.",
+ expected_output="List of search results with titles, URLs, and snippets.",
+ agent=data_agent,
+ name="collect_data",
+ is_start=True,
+ next_tasks=["validate_data"]
+)
+
+validate_task = Task(
+ description="""Validate the collected data. Check if:
+ 1. At least 5 results are returned.
+ 2. Each result contains a title and a URL.
+ Return validation_result as 'valid' or 'invalid' only no other text.""",
+ expected_output="Validation result indicating if data is valid or invalid.",
+ agent=data_agent,
+ name="validate_data",
+ task_type="decision",
+ condition={
+ "valid": [], # End the workflow on valid data
+ "invalid": ["collect_data"] # Retry data collection on invalid data
+ },
+)
+
+# 4. Workflow
+agents = PraisonAIAgents(
+ agents=[data_agent],
+ tasks=[collect_task, validate_task],
+ verbose=1,
+ process="workflow"
+)
+
+agents.start()
\ No newline at end of file
diff --git a/cookbooks/general/example_sequential.py b/cookbooks/general/example_sequential.py
new file mode 100644
index 00000000..139bb8ee
--- /dev/null
+++ b/cookbooks/general/example_sequential.py
@@ -0,0 +1,141 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents, error_logs
+from duckduckgo_search import DDGS
+
+def my_callback(output):
+ print(f"Callback Task output: {output}")
+
+def internet_search_tool(query) -> list:
+ """
+ Perform a search using DuckDuckGo.
+
+ Args:
+ query (str): The search query.
+
+ Returns:
+ list: A list of search result titles and URLs.
+ """
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=10):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+ return results
+
+ except Exception as e:
+ print(f"Error during DuckDuckGo search: {e}")
+ return []
+
+# Create agents
+researcher = Agent(
+ name="Researcher",
+ role="Senior Research Analyst",
+ goal="Uncover cutting-edge developments in AI and data science",
+ backstory="""You are an expert at a technology research group,
+ skilled in identifying trends and analyzing complex data.""",
+ verbose=True,
+ allow_delegation=False,
+ tools=[internet_search_tool],
+ llm="gpt-4o",
+ markdown=True,
+ reflect_llm="gpt-4o",
+ min_reflect=2,
+ max_reflect=4
+)
+writer = Agent(
+ name="Writer",
+ role="Tech Content Strategist",
+ goal="Craft compelling content on tech advancements",
+ backstory="""You are a content strategist known for
+ making complex tech topics interesting and easy to understand.""",
+ verbose=True,
+ allow_delegation=True,
+ llm="gpt-4o",
+ tools=[],
+ markdown=True
+)
+
+# Create tasks
+task1 = Task(
+ name="research_task",
+ description="""Analyze 2024's AI advancements.
+ Find major trends, new technologies, and their effects.""",
+ expected_output="""A detailed report on 2024 AI advancements""",
+ agent=researcher,
+ tools=[internet_search_tool]
+)
+
+task2 = Task(
+ name="writing_task",
+ description="""Create a blog post about major AI advancements using the insights you have.
+ Make it interesting, clear, and suited for tech enthusiasts.
+ It should be at least 4 paragraphs long.
+ Also, call the get_weather tool to get the weather in Paris.""",
+ expected_output="A blog post of at least 4 paragraphs, and weather in Paris",
+ agent=writer,
+ context=[task1],
+ callback=my_callback,
+ tools=[]
+)
+
+task3 = Task(
+ name="json_task",
+ description="""Create a json object with a title of "My Task" and content of "My content".""",
+ expected_output="""JSON output with title and content""",
+ agent=researcher,
+)
+
+task4 = Task(
+ name="save_output_task",
+ description="""Save the AI blog post to a file""",
+ expected_output="""File saved successfully""",
+ agent=writer,
+ context=[task2],
+ output_file='outputs/ai_blog_post.txt',
+ create_directory=True
+)
+
+# Create and run agents manager
+agents = PraisonAIAgents(
+ agents=[researcher, writer],
+ tasks=[task1, task2, task3, task4],
+ verbose=False,
+ process="sequential", # "sequential" or "hierarchical"
+ manager_llm="gpt-4o"
+)
+
+result = agents.start()
+
+# Print results and error summary
+print("\n=== Task Results ===")
+for task_id, task_status in result['task_status'].items():
+ print(f"Task {task_id}: {task_status}")
+ if task_result := result['task_results'].get(task_id):
+ print(f"Output: {task_result.raw[:200]}...") # Show first 200 chars
+
+# Print task details
+print("\n=== Task Details ===")
+for i in range(4):
+ print(agents.get_task_details(i))
+
+# Print agent details
+print("\n=== Agent Details ===")
+print(agents.get_agent_details('Researcher'))
+print(agents.get_agent_details('Writer'))
+
+# Print any errors
+if error_logs:
+ print("\n=== Error Summary ===")
+ for err in error_logs:
+ print(f"- {err}")
+ if "parsing self-reflection json" in err:
+ print(" Reason: The self-reflection JSON response was not valid JSON.")
+ elif "Error: Task with ID" in err:
+ print(" Reason: Task ID referenced does not exist.")
+ elif "saving task output to file" in err:
+ print(" Reason: Possible file permissions or invalid path.")
+ else:
+ print(" Reason not identified")
\ No newline at end of file
diff --git a/cookbooks/general/langchain_example.py b/cookbooks/general/langchain_example.py
new file mode 100644
index 00000000..aeddca42
--- /dev/null
+++ b/cookbooks/general/langchain_example.py
@@ -0,0 +1,30 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+from langchain_community.tools import YouTubeSearchTool
+from langchain_community.utilities import WikipediaAPIWrapper
+
+# Create an agent with both tools
+agent = Agent(
+ name="SearchAgent",
+ role="Research Assistant",
+ goal="Search for information from multiple sources",
+ backstory="I am an AI assistant that can search YouTube and Wikipedia.",
+ tools=[YouTubeSearchTool, WikipediaAPIWrapper],
+ self_reflect=False
+)
+
+# Create tasks to demonstrate both tools
+task = Task(
+ name="search_task",
+ description="Search for information about 'AI advancements' on both YouTube and Wikipedia",
+ expected_output="Combined information from YouTube videos and Wikipedia articles",
+ agent=agent
+)
+
+# Create and start the workflow
+agents = PraisonAIAgents(
+ agents=[agent],
+ tasks=[task],
+ verbose=True
+)
+
+agents.start()
\ No newline at end of file
diff --git a/cookbooks/general/memory_example.py b/cookbooks/general/memory_example.py
new file mode 100644
index 00000000..c4e17c1b
--- /dev/null
+++ b/cookbooks/general/memory_example.py
@@ -0,0 +1,183 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+import logging
+import os
+
+def main():
+ # Initialize memory config
+ memory_config = {
+ "provider": "rag",
+ "use_embedding": True,
+ "storage": {
+ "type": "sqlite",
+ "path": "./.praison/memory.db"
+ },
+ "rag_db_path": "./.praison/chroma_db"
+ }
+
+ # Test facts
+ fact1 = "The capital city of Jujuha is Hahanu and its population is 102300"
+ fact2 = "Three main ingredients in a classic proloder are eggs, sugar, and flour"
+ fact3 = "The year the first Josinga was released is 2007"
+
+ # # Check if database exists
+ # if os.path.exists("./memory.db"):
+ # logger.info("Found existing memory database")
+ # else:
+ # logger.info("Creating new memory database")
+
+ # Create task config (without memory config since it's moved to PraisonAIAgents)
+ task_config = {}
+
+ # Create agents with different roles
+ researcher = Agent(
+ role="Research Analyst",
+ goal="Research and document key information about topics",
+ backstory="Expert at analyzing and storing information in memory",
+ llm="gpt-4o-mini"
+ )
+
+ retriever = Agent(
+ role="Information Retriever",
+ goal="Retrieve and verify stored information from memory",
+ backstory="Specialist in searching and validating information from memory",
+ llm="gpt-4o-mini"
+ )
+
+ # Task 1: Process the facts
+ store_task = Task(
+ description=f"""
+ Process and analyze this information:
+ 1. {fact1}
+ 2. {fact2}
+ 3. {fact3}
+
+ Provide a clear summary of each fact.
+ """,
+ expected_output="""
+ Clear statements summarizing each fact.
+ Example format:
+ 1. [Summary of fact 1]
+ 2. [Summary of fact 2]
+ 3. [Summary of fact 3]
+ """,
+ agent=researcher
+ )
+
+ # Task 2: Write essay about AI
+ verify_task = Task(
+ description="""
+ write few points about AI
+ """,
+ expected_output="Points about AI",
+ agent=retriever
+ )
+
+ # Task 3: Query memory
+ query_task = Task(
+ description="""
+ Using ONLY information found in memory:
+ 1. What is stored in memory about Hahanu?
+ 2. What ingredients for proloder are recorded in memory?
+ 3. What year is stored in memory for the Josinga release?
+
+ For each answer, cite the memory record you found.
+ """,
+ expected_output="Answers based solely on memory records with citations",
+ agent=retriever
+ )
+
+ # Task 4: Query both short-term and long-term memory
+ query_both_task = Task(
+ description="""
+ Using ONLY information found in memory:
+ 1. What is stored in both short-term and long-term memory about Jujuha?
+ 2. What ingredients for proloder are recorded in both short-term and long-term memory?
+ 3. What year is stored in both short-term and long-term memory for the Josinga release?
+
+ For each answer, cite the memory record you found.
+ """,
+ expected_output="Answers based solely on memory records with citations",
+ agent=retriever
+ )
+
+ # Initialize PraisonAIAgents with memory configuration
+ agents = PraisonAIAgents(
+ agents=[researcher, retriever],
+ tasks=[store_task, verify_task, query_task, query_both_task],
+ verbose=True, # Use same verbose level as memory
+ memory=True,
+ embedder={
+ "provider": "openai",
+ "config": {
+ "model": "text-embedding-3-small"
+ }
+ }
+ )
+
+ # agents = PraisonAIAgents(
+ # agents=[researcher, retriever],
+ # tasks=[store_task, verify_task, query_task, query_both_task],
+ # verbose=True, # Use same verbose level as memory
+ # memory=True
+ # )
+
+ # Execute tasks
+ print("\nExecuting Memory Test Tasks...")
+ print("-" * 50)
+ agents.start()
+
+ # Use shared memory for final checks
+ memory = agents.shared_memory
+
+ # Test memory retrieval with different quality thresholds
+ if memory:
+ print("\nFinal Memory Check:")
+ print("-" * 50)
+
+ queries = ["Jujuha", "proloder", "Josinga"]
+ for query in queries:
+ print(f"\nSearching memory for: {query}")
+
+ # Search in both short-term and long-term memory
+ print("\nShort-term memory results:")
+ stm_results = memory.search_short_term(query)
+ if stm_results:
+ for item in stm_results:
+ print(f"Content: {item.get('content', '')[:200]}")
+ if 'meta' in item:
+ print(f"Metadata: {item['meta']}")
+ print("-" * 20)
+ else:
+ print("No results found in short-term memory")
+
+ print("\nLong-term memory results:")
+ ltm_results = memory.search_long_term(query)
+ if ltm_results:
+ for item in ltm_results:
+ print(f"Content: {item.get('text', '')[:200]}")
+ if 'metadata' in item:
+ print(f"Metadata: {item['metadata']}")
+ print("-" * 20)
+ else:
+ print("No results found in long-term memory")
+
+ # Also check ChromaDB if using RAG
+ if memory.use_rag and hasattr(memory, "chroma_col"):
+ print("\nChromaDB results:")
+ try:
+ all_items = memory.chroma_col.get()
+ print(f"Found {len(all_items['ids'])} items in ChromaDB")
+ for i in range(len(all_items['ids'])):
+ print(f"ID: {all_items['ids'][i]}")
+ print(f"Content: {all_items['documents'][i][:200]}")
+ print(f"Metadata: {all_items['metadatas'][i]}")
+ print("-" * 20)
+ except Exception as e:
+ print(f"Error querying ChromaDB: {e}")
+
+ print("-" * 30)
+ else:
+ print("\nNo memory available for final checks")
+
+if __name__ == "__main__":
+ main()
diff --git a/cookbooks/general/memory_simple.py b/cookbooks/general/memory_simple.py
new file mode 100644
index 00000000..52c0bb70
--- /dev/null
+++ b/cookbooks/general/memory_simple.py
@@ -0,0 +1,63 @@
+from praisonaiagents.agents.agents import Agent, Task, PraisonAIAgents
+from praisonaiagents.tools import duckduckgo
+
+# Test facts
+fact1 = "The capital city of Jujuha is Hahanu and its population is 102300"
+fact2 = "Three main ingredients in a classic proloder are eggs, sugar, and flour"
+fact3 = "The year the first Josinga was released is 2007"
+
+fact_agent = Agent(
+ name="Fact Agent",
+ instructions="You are a fact agent, you store and retrieve facts in memory",
+ llm="gpt-4o-mini"
+)
+
+research_agent = Agent(
+ name="Research Agent",
+ instructions="You are a research analyst, you research and document key points about topics",
+ llm="gpt-4o-mini"
+)
+
+blog_agent = Agent(
+ name="Blog Agent",
+ instructions="You are a blog writer, you write a blog post about the research",
+ llm="gpt-4o-mini"
+)
+
+fact_task = Task(
+ description="Store the following facts in memory: " + fact1 + ", " + fact2 + ", " + fact3,
+ agent=fact_agent
+)
+
+research_task = Task(
+ description="Research and document 2 key points about AI",
+ agent=research_agent
+)
+
+research_task2 = Task(
+ description="Research and document 2 key points about AI in healthcare",
+ agent=research_agent
+)
+
+research_task3 = Task(
+ description="Research and document 2 key points about AI in education",
+ agent=research_agent
+)
+
+research_task4 = Task(
+ description="Research and document 2 key points about AI in finance",
+ agent=research_agent
+)
+
+blog_task = Task(
+ description="Write a blog post about Jujuha",
+ agent=blog_agent
+)
+
+agents = PraisonAIAgents(
+ agents=[fact_agent, research_agent, blog_agent],
+ tasks=[fact_task, research_task, research_task2, research_task3, research_task4, blog_task],
+ memory=True
+)
+
+agents.start()
\ No newline at end of file
diff --git a/cookbooks/general/mini_agents_example.py b/cookbooks/general/mini_agents_example.py
new file mode 100644
index 00000000..08ad455f
--- /dev/null
+++ b/cookbooks/general/mini_agents_example.py
@@ -0,0 +1,6 @@
+from praisonaiagents import Agent, Agents, Tools
+
+research_agent = Agent(instructions="You are a research agent to search internet about AI 2024", tools=[Tools.internet_search])
+summarise_agent = Agent(instructions="You are a summarize agent to summarise in points")
+agents = Agents(agents=[research_agent, summarise_agent])
+agents.start()
\ No newline at end of file
diff --git a/cookbooks/general/multimodal.py b/cookbooks/general/multimodal.py
new file mode 100644
index 00000000..68dde978
--- /dev/null
+++ b/cookbooks/general/multimodal.py
@@ -0,0 +1,59 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+
+# Create Vision Analysis Agent
+vision_agent = Agent(
+ name="VisionAnalyst",
+ role="Computer Vision Specialist",
+ goal="Analyze images and videos to extract meaningful information",
+ backstory="""You are an expert in computer vision and image analysis.
+ You excel at describing images, detecting objects, and understanding visual content.""",
+ llm="gpt-4o-mini",
+ self_reflect=False
+)
+
+# 1. Task with Image URL
+task1 = Task(
+ name="analyze_landmark",
+ description="Describe this famous landmark and its architectural features.",
+ expected_output="Detailed description of the landmark's architecture and significance",
+ agent=vision_agent,
+ images=["https://upload.wikimedia.org/wikipedia/commons/b/bf/Krakow_-_Kosciol_Mariacki.jpg"]
+)
+
+# 2. Task with Local Image File
+task2 = Task(
+ name="analyze_local_image",
+ description="What objects can you see in this image? Describe their arrangement.",
+ expected_output="Detailed description of objects and their spatial relationships",
+ agent=vision_agent,
+ images=["image.jpg"]
+)
+
+# 3. Task with Video File
+task3 = Task(
+ name="analyze_video",
+ description="""Watch this video and provide:
+ 1. A summary of the main events
+ 2. Key objects and people visible
+ 3. Any text or important information shown
+ 4. The overall context and setting""",
+ expected_output="Comprehensive analysis of the video content",
+ agent=vision_agent,
+ images=["video.mp4"]
+)
+
+# Create PraisonAIAgents instance
+agents = PraisonAIAgents(
+ agents=[vision_agent],
+ tasks=[task1, task2, task3],
+ process="sequential",
+ verbose=1
+)
+
+# Run all tasks
+result = agents.start()
+
+# Print results
+for task_id, task_result in result["task_results"].items():
+ print(f"\nTask {task_id} Result:")
+ print(task_result.raw)
\ No newline at end of file
diff --git a/cookbooks/general/structured_agents_example.py b/cookbooks/general/structured_agents_example.py
new file mode 100644
index 00000000..9aa90d36
--- /dev/null
+++ b/cookbooks/general/structured_agents_example.py
@@ -0,0 +1,56 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents, Tools
+from pydantic import BaseModel
+
+class AnalysisReport(BaseModel):
+ title: str
+ findings: str
+ summary: str
+
+# Create a researcher agent
+researcher = Agent(
+ name="AIResearcher",
+ role="Technology Research Analyst",
+ goal="Analyze and structure information about AI developments",
+ backstory="Expert analyst specializing in AI technology trends",
+ verbose=True,
+ llm="gpt-4o-mini",
+ tools=[Tools.internet_search],
+ self_reflect=False
+)
+
+# Create an analyst agent
+analyst = Agent(
+ name="DataAnalyst",
+ role="Data Insights Specialist",
+ goal="Structure and analyze research findings",
+ backstory="Senior data analyst with expertise in pattern recognition",
+ verbose=True,
+ llm="gpt-4o-mini",
+ self_reflect=False
+)
+
+# Define structured tasks
+research_task = Task(
+ name="gather_research",
+ description="Research recent AI developments in 2024",
+ agent=researcher,
+ expected_output="Research findings"
+)
+
+analysis_task = Task(
+ name="analyze_findings",
+ description="Analyze research findings and create structured report. No additional text or explanation.",
+ agent=analyst,
+ output_json=AnalysisReport,
+ expected_output="JSON object"
+)
+
+# Initialize and run agents
+agents = PraisonAIAgents(
+ agents=[researcher, analyst],
+ tasks=[research_task, analysis_task],
+ process="sequential",
+ verbose=True
+)
+result = agents.start()
+print(result)
diff --git a/cookbooks/general/structured_response_example.py b/cookbooks/general/structured_response_example.py
new file mode 100644
index 00000000..115bc4d0
--- /dev/null
+++ b/cookbooks/general/structured_response_example.py
@@ -0,0 +1,95 @@
+from praisonaiagents import Agent
+from pydantic import BaseModel
+from typing import List, Optional
+
+# Define structured output models
+class SearchResult(BaseModel):
+ title: str
+ url: Optional[str]
+ snippet: str
+
+class AnalysisReport(BaseModel):
+ topic: str
+ key_findings: List[str]
+ search_results: List[SearchResult]
+ summary: str
+ confidence_score: float
+
+def get_structured_analysis(query: str, verbose: bool = True) -> AnalysisReport:
+ """
+ Performs a search and returns a structured analysis using an AI agent.
+
+ Args:
+ query (str): The search query or topic to analyze
+ verbose (bool): Whether to show detailed output
+
+ Returns:
+ AnalysisReport: A structured report containing the analysis
+ """
+ # Create an agent with search capabilities
+ agent = Agent(
+ name="StructuredAnalyst",
+ role="Research Analyst",
+ goal="Analyze topics and provide structured reports",
+ backstory="Expert at gathering information and providing structured analysis",
+ verbose=verbose,
+ self_reflect=True, # Enable self-reflection for better quality
+ markdown=True,
+ llm="gpt-4o-mini"
+ )
+
+ # Create the analysis prompt
+ prompt = f"""
+Analyze the following topic: "{query}"
+
+Provide a structured analysis in JSON format that matches this schema:
+{{
+ "topic": "string",
+ "key_findings": ["string"],
+ "search_results": [
+ {{
+ "title": "string",
+ "url": "string",
+ "snippet": "string"
+ }}
+ ],
+ "summary": "string",
+ "confidence_score": float (0-1)
+}}
+
+Requirements:
+1. Include at least 3 key findings
+2. Include at least 2 relevant search results
+3. Provide a comprehensive summary
+4. Set confidence score based on quality of sources (0-1)
+
+Return ONLY the JSON object, no other text.
+"""
+
+ # Get structured response from agent
+ response = agent.chat(
+ prompt=prompt,
+ output_json=AnalysisReport # This ensures response matches our model
+ )
+
+ return AnalysisReport.model_validate_json(response)
+
+if __name__ == "__main__":
+ # Example usage
+ analysis = get_structured_analysis("Latest developments in AI agents and autonomous systems")
+ print(analysis)
+ # Print the structured results
+ print("\n=== Structured Analysis Report ===")
+ print(f"Topic: {analysis.topic}")
+ print("\nKey Findings:")
+ for i, finding in enumerate(analysis.key_findings, 1):
+ print(f"{i}. {finding}")
+
+ print("\nSearch Results:")
+ for result in analysis.search_results:
+ print(f"\nTitle: {result.title}")
+ print(f"URL: {result.url}")
+ print(f"Snippet: {result.snippet}")
+
+ print(f"\nSummary: {analysis.summary}")
+ print(f"Confidence Score: {analysis.confidence_score:.2f}")
\ No newline at end of file
diff --git a/cookbooks/general/tools_example.py b/cookbooks/general/tools_example.py
new file mode 100644
index 00000000..891ea56b
--- /dev/null
+++ b/cookbooks/general/tools_example.py
@@ -0,0 +1,30 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+from praisonaiagents.tools import get_article, get_news_sources, get_articles_from_source, get_trending_topics
+
+# Create Wikipedia agent
+news_agent = Agent(
+ name="NewsAgent",
+ role="News Analyst",
+ goal="Collect and analyze news articles from various sources.",
+ backstory="Expert in news gathering and content analysis.",
+ tools=[get_article, get_news_sources, get_articles_from_source, get_trending_topics],
+ self_reflect=False
+)
+
+
+# Define research task
+news_task = Task(
+ description="Analyze news articles about 'AI developments' from major tech news sources.",
+ expected_output="Summary of key AI developments with source articles.",
+ agent=news_agent,
+ name="ai_news"
+)
+
+
+# Run agent
+agents = PraisonAIAgents(
+ agents=[news_agent],
+ tasks=[news_task],
+ process="sequential"
+)
+agents.start()
diff --git a/cookbooks/general/workflow_example_basic.py b/cookbooks/general/workflow_example_basic.py
new file mode 100644
index 00000000..d76b84c6
--- /dev/null
+++ b/cookbooks/general/workflow_example_basic.py
@@ -0,0 +1,74 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+from typing import List, Dict
+from duckduckgo_search import DDGS
+
+# 1. Tool
+def internet_search_tool(query: str) -> List[Dict]:
+ """
+ Perform a search using DuckDuckGo.
+
+ Args:
+ query (str): The search query.
+
+ Returns:
+ list: A list of search result titles, URLs, and snippets.
+ """
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=10):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", "")
+ })
+ return results
+
+ except Exception as e:
+ print(f"Error during DuckDuckGo search: {e}")
+ return []
+
+# 2. Agent
+data_agent = Agent(
+ name="DataCollector",
+ role="Search Specialist",
+ goal="Perform internet searches to collect relevant information.",
+ backstory="Expert in finding and organising internet data.",
+ tools=[internet_search_tool],
+ self_reflect=False
+)
+
+# 3. Tasks
+collect_task = Task(
+ description="Perform an internet search using the query: 'AI job trends in 2024'. Return results as a list of title, URL, and snippet.",
+ expected_output="List of search results with titles, URLs, and snippets.",
+ agent=data_agent,
+ name="collect_data",
+ is_start=True,
+ next_tasks=["validate_data"]
+)
+
+validate_task = Task(
+ description="""Validate the collected data. Check if:
+ 1. At least 5 results are returned.
+ 2. Each result contains a title and a URL.
+ Return validation_result as 'valid' or 'invalid' only no other text.""",
+ expected_output="Validation result indicating if data is valid or invalid.",
+ agent=data_agent,
+ name="validate_data",
+ task_type="decision",
+ condition={
+ "valid": [], # End the workflow on valid data
+ "invalid": ["collect_data"] # Retry data collection on invalid data
+ },
+)
+
+# 4. AI Agents Workflow
+agents = PraisonAIAgents(
+ agents=[data_agent],
+ tasks=[collect_task, validate_task],
+ verbose=1,
+ process="workflow"
+)
+
+agents.start()
\ No newline at end of file
diff --git a/cookbooks/general/workflow_example_detailed.py b/cookbooks/general/workflow_example_detailed.py
new file mode 100644
index 00000000..6dbab2a3
--- /dev/null
+++ b/cookbooks/general/workflow_example_detailed.py
@@ -0,0 +1,253 @@
+from praisonaiagents import Agent, Task, PraisonAIAgents
+import random
+from typing import List, Dict, Union
+import json
+from pydantic import BaseModel
+
+# Add Pydantic models for data validation
+class Person(BaseModel):
+ name: str
+ age: int
+ job: str
+ city: str
+ salary: int
+
+class ProcessedPerson(Person):
+ salary_category: str
+ age_group: str
+ processing_status: str
+
+class DataList(BaseModel):
+ items: List[Dict]
+
+class ValidationResult(BaseModel):
+ validation_result: str
+ details: str = ""
+
+def random_data_of_individuals() -> List[Dict]:
+ """Generate random individual data"""
+ names = ["John", "Jane", "Mike", "Sarah", "David", "Emma"]
+ jobs = ["Engineer", "Doctor", "Teacher", "Artist", "Developer"]
+ cities = ["New York", "London", "Tokyo", "Paris", "Berlin"]
+
+ data = []
+ for _ in range(random.randint(3, 7)):
+ person = {
+ "name": random.choice(names),
+ "age": random.randint(25, 65),
+ "job": random.choice(jobs),
+ "city": random.choice(cities),
+ "salary": random.randint(30000, 120000)
+ }
+ data.append(person)
+ return data
+
+def process_data_of_individuals(data: Union[List[Dict], Dict, str]) -> Dict:
+ """Process individual data by adding categories and analysis"""
+ try:
+ print("\n[DEBUG] process_data_of_individuals input:", data)
+ print("[DEBUG] Current workflow state:")
+ print("- total_records:", workflow.get_state("total_records"))
+ print("- current_index:", workflow.get_state("current_index"))
+ print("- remaining:", workflow.get_state("remaining"))
+
+ # Get the items list from the collect_task result
+ collect_result = None
+ for task in workflow.tasks.values():
+ if task.name == "collect_data" and task.result:
+ try:
+ collect_data = json.loads(task.result.raw)
+ collect_result = collect_data.get("items", [])
+ print("[DEBUG] Found collect_data items:", len(collect_result))
+ except:
+ print("[DEBUG] Failed to parse collect_data result")
+
+ # Handle string input by trying to parse it as JSON
+ if isinstance(data, str):
+ if ":" in data and not data.strip().startswith("{"):
+ # Convert string format to dictionary
+ pairs = [pair.strip() for pair in data.split(",")]
+ data_dict = {}
+ for pair in pairs:
+ key, value = pair.split(":")
+ key = key.strip().lower()
+ value = value.strip()
+ if key == "age" or key == "salary":
+ value = int(value)
+ data_dict[key] = value
+ data = data_dict
+ else:
+ data = json.loads(data)
+ print("[DEBUG] Parsed data:", data)
+
+ # Handle single record
+ if isinstance(data, dict):
+ person = data
+ # Initialize total records if not set
+ total_records = workflow.get_state("total_records")
+ if total_records is None and collect_result:
+ total_records = len(collect_result)
+ workflow.set_state("total_records", total_records)
+ print(f"[DEBUG] Initialized total_records to {total_records}")
+
+ current_index = workflow.get_state("current_index", 0)
+ total_records = total_records or 1
+ remaining = total_records - (current_index + 1)
+ workflow.set_state("remaining", remaining)
+ print(f"[DEBUG] Processing record {current_index + 1}/{total_records}")
+
+ elif isinstance(data, list):
+ if len(data) == 0:
+ raise ValueError("Empty data list")
+ person = data[0]
+ workflow.set_state("total_records", len(data))
+ workflow.set_state("current_index", 0)
+ workflow.set_state("remaining", len(data) - 1)
+ print(f"[DEBUG] First record from list of {len(data)} items")
+ else:
+ raise ValueError("Input must be a dictionary or list with at least one record")
+
+ processed_person = person.copy()
+
+ # Add salary category
+ salary = person.get("salary", 0)
+ if salary < 50000:
+ processed_person["salary_category"] = "entry"
+ elif salary < 90000:
+ processed_person["salary_category"] = "mid"
+ else:
+ processed_person["salary_category"] = "senior"
+
+ # Add age group
+ age = person.get("age", 0)
+ if age < 35:
+ processed_person["age_group"] = "young"
+ elif age < 50:
+ processed_person["age_group"] = "mid"
+ else:
+ processed_person["age_group"] = "senior"
+
+ # Add processing status using workflow state
+ remaining = workflow.get_state("remaining", 0)
+ current_index = workflow.get_state("current_index", 0)
+ total_records = workflow.get_state("total_records", 1)
+
+ # Update current index for next iteration
+ workflow.set_state("current_index", current_index + 1)
+
+ print(f"[DEBUG] Status check - remaining: {remaining}, current_index: {current_index}, total_records: {total_records}")
+
+ if remaining <= 0 and current_index >= total_records - 1:
+ print("[DEBUG] Setting status to 'all records processed'")
+ processed_person["processing_status"] = "all records processed"
+ else:
+ print(f"[DEBUG] More records to process. Remaining: {remaining}")
+ processed_person["processing_status"] = f"more records to process ({remaining} remaining)"
+
+ print("[DEBUG] Final processed person:", processed_person)
+ return processed_person
+
+ except Exception as e:
+ print(f"[DEBUG] Error processing data: {str(e)}")
+ return {"error": str(e), "processing_status": "error occurred"}
+
+# Create agents
+data_agent = Agent(
+ name="DataCollector",
+ role="Data collection specialist",
+ goal="Collect and validate data about individuals",
+ backstory="Expert in gathering and validating demographic data",
+ tools=[random_data_of_individuals],
+ self_reflect=False
+)
+
+process_agent = Agent(
+ name="DataProcessor",
+ role="Data processor",
+ goal="Process and categorize individual data",
+ backstory="Expert in data analysis and categorization",
+ tools=[process_data_of_individuals],
+ self_reflect=False
+)
+
+# Modify tasks to use Pydantic models
+collect_task = Task(
+ description="Collect random individual data using the random_data_of_individuals tool. Return as a JSON object with 'items' array.",
+ expected_output="List of individual records with basic information",
+ agent=data_agent,
+ name="collect_data",
+ tools=[random_data_of_individuals],
+ is_start=True,
+ next_tasks=["validate_data"],
+ output_json=DataList
+)
+
+validate_task = Task(
+ description="""Validate the collected data. Check if:
+ 1. All required fields are present (name, age, job, city, salary)
+ 2. Age is between 25 and 65
+ 3. Salary is between 30000 and 120000
+ Return validation_result as 'valid' or 'invalid' with optional details.""",
+ expected_output="Validation result indicating if data is valid or invalid",
+ agent=data_agent,
+ name="validate_data",
+ task_type="decision",
+ condition={
+ "valid": ["process_data"],
+ "invalid": ["collect_data"]
+ },
+ output_json=ValidationResult
+)
+
+process_task = Task(
+ description="""Process one record at a time from the input data.
+ Current progress will be shown in Loop Status.
+
+ For the current record:
+ 1. Use process_data_of_individuals tool to add categories
+ 2. Return the processed record with remaining count
+
+ Current remaining: {remaining}
+ Current item: {current_item}
+
+ Process this record and indicate if more records remain.""",
+ expected_output="Processed record with categories and status",
+ agent=process_agent,
+ name="process_data",
+ tools=[process_data_of_individuals],
+ task_type="loop",
+ condition={
+ "more records to process": ["process_data"],
+ "all records processed": []
+ },
+ context=[collect_task],
+ output_json=ProcessedPerson
+)
+
+# Create PraisonAIAgents instance with workflow process
+workflow = PraisonAIAgents(
+ agents=[data_agent, process_agent],
+ tasks=[collect_task, validate_task, process_task],
+ verbose=1,
+ process="workflow"
+)
+
+# Run the workflow
+result = workflow.start()
+
+# Print results
+print("\nWorkflow Results:")
+print("----------------")
+for task_id, task in workflow.tasks.items():
+ print(f"\nTask: {task.name}")
+ print(f"Status: {task.status}")
+ if task.result:
+ print("Output:")
+ try:
+ # Try to format as pretty JSON
+ import json
+ output = json.loads(task.result.raw)
+ print(json.dumps(output, indent=2))
+ except:
+ # If not JSON, print raw output
+ print(task.result.raw[:500] + "..." if len(task.result.raw) > 500 else task.result.raw)
\ No newline at end of file
diff --git a/docs/features/agenticrouting.mdx b/docs/features/agenticrouting.mdx
new file mode 100644
index 00000000..ad46042e
--- /dev/null
+++ b/docs/features/agenticrouting.mdx
@@ -0,0 +1,224 @@
+---
+title: "Agentic Routing"
+description: "Learn how to create AI agents with dynamic routing capabilities for workflow management."
+icon: "route"
+---
+
+## Quick Start
+
+