Single-prompt agents work for demos but fail on complex tasks. Real-world problems require planning, error recovery, and coordination. Advanced agent patterns address these challenges.
Here are patterns for building agents that handle complexity.
Agent Architecture Evolution
From Simple to Complex
agent_evolution:
level_1_react:
pattern: "Reason → Act → Observe → Repeat"
limitations:
- Gets stuck in loops
- No long-term planning
- Forgets context
level_2_plan_execute:
pattern: "Plan → Execute steps → Replan if needed"
improvements:
- Explicit planning phase
- Better task decomposition
- Can recover from failures
level_3_hierarchical:
pattern: "Orchestrator → Specialized sub-agents"
improvements:
- Division of labor
- Parallel execution
- Expertise per subtask
Planning Patterns
Plan-and-Execute
class PlanExecuteAgent:
"""Agent that plans before acting."""
async def run(self, task: str) -> AgentResult:
# Phase 1: Create plan
plan = await self._create_plan(task)
# Phase 2: Execute steps
results = []
for step in plan.steps:
result = await self._execute_step(step, results)
results.append(result)
# Replan if needed
if result.requires_replan:
remaining = await self._replan(task, plan, results)
plan = Plan(steps=remaining)
return self._compile_results(results)
async def _create_plan(self, task: str) -> Plan:
response = await self.llm.generate(
prompt=f"""Create a step-by-step plan to complete this task.
Task: {task}
For each step provide:
- Step number
- Action to take
- Expected outcome
- Dependencies (which previous steps must complete)
Plan:"""
)
return Plan.parse(response)
async def _replan(
self,
task: str,
original_plan: Plan,
results: list[StepResult]
) -> list[Step]:
response = await self.llm.generate(
prompt=f"""The original plan needs adjustment.
Task: {task}
Original plan:
{original_plan.format()}
Results so far:
{self._format_results(results)}
Create updated remaining steps:"""
)
return self._parse_steps(response)
Tree of Thought
class TreeOfThoughtAgent:
"""Explore multiple reasoning paths."""
async def solve(
self,
problem: str,
num_branches: int = 3,
depth: int = 3
) -> Solution:
root = ThoughtNode(thought="Start", children=[])
# Generate initial thoughts
initial_thoughts = await self._generate_thoughts(
problem,
context="",
n=num_branches
)
for thought in initial_thoughts:
root.children.append(ThoughtNode(thought=thought, children=[]))
# Expand tree
for d in range(depth - 1):
leaves = self._get_leaves(root)
for leaf in leaves:
# Evaluate if worth expanding
score = await self._evaluate_thought(problem, leaf.path())
if score < 0.3: # Prune low-scoring branches
continue
# Generate next thoughts
next_thoughts = await self._generate_thoughts(
problem,
context=leaf.path(),
n=num_branches
)
for thought in next_thoughts:
leaf.children.append(ThoughtNode(thought=thought, children=[]))
# Find best path
best_leaf = await self._find_best_leaf(root, problem)
return Solution(reasoning=best_leaf.path(), answer=best_leaf.thought)
async def _generate_thoughts(
self,
problem: str,
context: str,
n: int
) -> list[str]:
response = await self.llm.generate(
prompt=f"""Problem: {problem}
Reasoning so far: {context or "None"}
Generate {n} different next steps in reasoning. Each should be a distinct approach.
"""
)
return self._parse_thoughts(response)
Multi-Agent Patterns
Orchestrator Pattern
class OrchestratorAgent:
"""Coordinate specialized sub-agents."""
def __init__(self, agents: dict[str, Agent]):
self.agents = agents
self.llm = get_orchestrator_llm()
async def run(self, task: str) -> Result:
# Decompose task
subtasks = await self._decompose(task)
# Assign to agents
assignments = await self._assign_subtasks(subtasks)
# Execute (parallel where possible)
results = {}
for batch in self._get_parallel_batches(assignments):
batch_results = await asyncio.gather(*[
self.agents[a.agent_name].run(a.subtask)
for a in batch
])
for assignment, result in zip(batch, batch_results):
results[assignment.subtask_id] = result
# Synthesize results
return await self._synthesize(task, results)
async def _decompose(self, task: str) -> list[Subtask]:
agent_descriptions = "\n".join([
f"- {name}: {agent.description}"
for name, agent in self.agents.items()
])
response = await self.llm.generate(
prompt=f"""Decompose this task into subtasks.
Available agents:
{agent_descriptions}
Task: {task}
List subtasks with:
- Subtask ID
- Description
- Required agent
- Dependencies (other subtask IDs)
"""
)
return self._parse_subtasks(response)
Debate Pattern
class DebateAgents:
"""Multiple agents debate to improve answers."""
async def solve(
self,
question: str,
rounds: int = 3
) -> str:
# Get initial answers from different "perspectives"
perspectives = ["analytical", "creative", "skeptical"]
answers = {}
for p in perspectives:
answers[p] = await self._generate_answer(question, p)
# Debate rounds
for round in range(rounds):
for p in perspectives:
other_answers = {k: v for k, v in answers.items() if k != p}
answers[p] = await self._critique_and_revise(
question,
own_answer=answers[p],
other_answers=other_answers,
perspective=p
)
# Synthesize final answer
return await self._synthesize_answers(question, answers)
async def _critique_and_revise(
self,
question: str,
own_answer: str,
other_answers: dict[str, str],
perspective: str
) -> str:
return await self.llm.generate(
prompt=f"""You are a {perspective} thinker.
Question: {question}
Your previous answer: {own_answer}
Other perspectives:
{self._format_other_answers(other_answers)}
Consider their points. Critique your answer and revise if needed.
Revised answer:"""
)
Memory Patterns
Working Memory
class WorkingMemoryAgent:
"""Agent with structured working memory."""
def __init__(self, llm, memory_slots: int = 5):
self.llm = llm
self.memory = {} # slot_name -> content
self.max_slots = memory_slots
async def run(self, task: str) -> Result:
self.memory = {"task": task}
while not self._is_complete():
# Decide next action
action = await self._decide_action()
if action.type == "think":
# Store thought in memory
self._update_memory(action.slot, action.content)
elif action.type == "tool":
result = await self._execute_tool(action)
self._update_memory(f"tool_result_{action.tool}", result)
elif action.type == "complete":
return Result(answer=action.content)
# Compress memory if too large
if len(self.memory) > self.max_slots:
await self._compress_memory()
async def _compress_memory(self):
"""Summarize and compress working memory."""
summary = await self.llm.generate(
prompt=f"""Summarize the key information from this working memory.
Keep only what's needed to complete the task.
Current memory:
{self._format_memory()}
Compressed summary:"""
)
self.memory = {"summary": summary, "task": self.memory["task"]}
Error Recovery
class ResilientAgent:
"""Agent with sophisticated error recovery."""
async def run(self, task: str) -> Result:
attempts = []
for attempt in range(self.max_attempts):
try:
result = await self._execute(task, previous_attempts=attempts)
return result
except ToolError as e:
recovery = await self._plan_recovery(task, e, attempts)
attempts.append(AttemptRecord(error=e, recovery=recovery))
if recovery.strategy == "retry_different":
continue
elif recovery.strategy == "decompose":
return await self._decompose_and_retry(task, e)
elif recovery.strategy == "ask_user":
return await self._escalate_to_user(task, e)
return Result(error="Max attempts exceeded", attempts=attempts)
async def _plan_recovery(
self,
task: str,
error: Exception,
previous: list[AttemptRecord]
) -> RecoveryPlan:
response = await self.llm.generate(
prompt=f"""An error occurred while executing a task.
Task: {task}
Error: {error}
Previous attempts:
{self._format_attempts(previous)}
Choose a recovery strategy:
1. retry_different - Try a different approach
2. decompose - Break into smaller subtasks
3. ask_user - Need human input
Strategy and reasoning:"""
)
return RecoveryPlan.parse(response)
Key Takeaways
- Simple ReAct agents fail on complex tasks
- Plan-and-execute provides structure and recoverability
- Tree of thought explores multiple reasoning paths
- Orchestrator pattern enables specialization
- Debate improves answer quality through critique
- Working memory prevents context overflow
- Build sophisticated error recovery
- Test agents with adversarial inputs
- Start simple, add complexity as needed
- Agents are systems, not just prompts
Complex tasks need complex agents. Build them carefully.