Files
ajarbot/agent.py

185 lines
6.9 KiB
Python
Raw Normal View History

"""AI Agent with Memory and LLM Integration."""
from typing import List, Optional
from heartbeat import Heartbeat
from hooks import HooksSystem
from llm_interface import LLMInterface
from memory_system import MemorySystem
from tools import TOOL_DEFINITIONS, execute_tool
# Maximum number of recent messages to include in LLM context
MAX_CONTEXT_MESSAGES = 3 # Reduced from 5 to save tokens
# Maximum characters of agent response to store in memory
MEMORY_RESPONSE_PREVIEW_LENGTH = 200
class Agent:
"""AI Agent with memory, LLM, heartbeat, and hooks."""
def __init__(
self,
provider: str = "claude",
workspace_dir: str = "./memory_workspace",
enable_heartbeat: bool = False,
) -> None:
self.memory = MemorySystem(workspace_dir)
self.llm = LLMInterface(provider)
self.hooks = HooksSystem()
self.conversation_history: List[dict] = []
self.memory.sync()
self.hooks.trigger("agent", "startup", {"workspace_dir": workspace_dir})
self.heartbeat: Optional[Heartbeat] = None
if enable_heartbeat:
self.heartbeat = Heartbeat(self.memory, self.llm)
self.heartbeat.on_alert = self._on_heartbeat_alert
self.heartbeat.start()
def _on_heartbeat_alert(self, message: str) -> None:
"""Handle heartbeat alerts."""
print(f"\nHeartbeat Alert:\n{message}\n")
def chat(self, user_message: str, username: str = "default") -> str:
"""Chat with context from memory and tool use."""
# Handle model switching commands
if user_message.lower().startswith("/model "):
model_name = user_message[7:].strip()
self.llm.set_model(model_name)
return f"Switched to model: {model_name}"
elif user_message.lower() == "/sonnet":
self.llm.set_model("claude-sonnet-4-5-20250929")
return "Switched to Claude Sonnet 4.5 (more capable, higher cost)"
elif user_message.lower() == "/haiku":
self.llm.set_model("claude-haiku-4-5-20251001")
return "Switched to Claude Haiku 4.5 (faster, cheaper)"
elif user_message.lower() == "/status":
current_model = self.llm.model
is_sonnet = "sonnet" in current_model.lower()
cache_status = "enabled" if is_sonnet else "disabled (Haiku active)"
return (
f"Current model: {current_model}\n"
f"Prompt caching: {cache_status}\n"
f"Context messages: {MAX_CONTEXT_MESSAGES}\n"
f"Memory results: 2\n\n"
f"Commands: /sonnet, /haiku, /status"
)
soul = self.memory.get_soul()
user_profile = self.memory.get_user(username)
relevant_memory = self.memory.search(user_message, max_results=2)
memory_lines = [f"- {mem['snippet']}" for mem in relevant_memory]
system = (
f"{soul}\n\nUser Profile:\n{user_profile}\n\n"
f"Relevant Memory:\n" + "\n".join(memory_lines) +
f"\n\nYou have access to tools for file operations and command execution. "
f"Use them freely to help the user."
)
self.conversation_history.append(
{"role": "user", "content": user_message}
)
# Tool execution loop
max_iterations = 5 # Reduced from 10 to save costs
# Enable caching for Sonnet to save 90% on repeated system prompts
use_caching = "sonnet" in self.llm.model.lower()
for iteration in range(max_iterations):
response = self.llm.chat_with_tools(
self.conversation_history[-MAX_CONTEXT_MESSAGES:],
tools=TOOL_DEFINITIONS,
system=system,
use_cache=use_caching,
)
# Check stop reason
if response.stop_reason == "end_turn":
# Extract text response
text_content = []
for block in response.content:
if block.type == "text":
text_content.append(block.text)
final_response = "\n".join(text_content)
self.conversation_history.append(
{"role": "assistant", "content": final_response}
)
preview = final_response[:MEMORY_RESPONSE_PREVIEW_LENGTH]
self.memory.write_memory(
f"**User ({username})**: {user_message}\n"
f"**Agent**: {preview}...",
daily=True,
)
return final_response
elif response.stop_reason == "tool_use":
# Build assistant message with tool uses
assistant_content = []
tool_uses = []
for block in response.content:
if block.type == "text":
assistant_content.append({
"type": "text",
"text": block.text
})
elif block.type == "tool_use":
assistant_content.append({
"type": "tool_use",
"id": block.id,
"name": block.name,
"input": block.input
})
tool_uses.append(block)
self.conversation_history.append({
"role": "assistant",
"content": assistant_content
})
# Execute tools and build tool result message
tool_results = []
for tool_use in tool_uses:
result = execute_tool(tool_use.name, tool_use.input)
print(f"[Tool] {tool_use.name}: {result[:100]}...")
tool_results.append({
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": result
})
self.conversation_history.append({
"role": "user",
"content": tool_results
})
else:
# Unexpected stop reason
return f"Unexpected stop reason: {response.stop_reason}"
return "Error: Maximum tool use iterations exceeded"
def switch_model(self, provider: str) -> None:
"""Switch LLM provider."""
self.llm = LLMInterface(provider)
if self.heartbeat:
self.heartbeat.llm = self.llm
def shutdown(self) -> None:
"""Cleanup and stop background services."""
if self.heartbeat:
self.heartbeat.stop()
self.memory.close()
self.hooks.trigger("agent", "shutdown", {})
if __name__ == "__main__":
agent = Agent(provider="claude")
response = agent.chat("What's my current project?", username="alice")
print(response)