Features: - Multi-platform bot (Slack, Telegram) - Memory system with SQLite FTS - Tool use capabilities (file ops, commands) - Scheduled tasks system - Dynamic model switching (/sonnet, /haiku) - Prompt caching for cost optimization Optimizations: - Default to Haiku 4.5 (12x cheaper) - Reduced context: 3 messages, 2 memory results - Optimized SOUL.md (48% smaller) - Automatic caching when using Sonnet (90% savings) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
119 lines
3.5 KiB
Python
119 lines
3.5 KiB
Python
"""LLM Interface - Claude API, GLM, and other models."""
|
|
|
|
import os
|
|
from typing import Any, Dict, List, Optional
|
|
|
|
import requests
|
|
from anthropic import Anthropic
|
|
from anthropic.types import Message
|
|
|
|
# API key environment variable names by provider
|
|
_API_KEY_ENV_VARS = {
|
|
"claude": "ANTHROPIC_API_KEY",
|
|
"glm": "GLM_API_KEY",
|
|
}
|
|
|
|
# Default models by provider
|
|
_DEFAULT_MODELS = {
|
|
"claude": "claude-haiku-4-5-20251001", # 12x cheaper than Sonnet!
|
|
"glm": "glm-4-plus",
|
|
}
|
|
|
|
_GLM_BASE_URL = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
|
|
|
|
|
|
class LLMInterface:
|
|
"""Simple LLM interface supporting Claude and GLM."""
|
|
|
|
def __init__(
|
|
self,
|
|
provider: str = "claude",
|
|
api_key: Optional[str] = None,
|
|
) -> None:
|
|
self.provider = provider
|
|
self.api_key = api_key or os.getenv(
|
|
_API_KEY_ENV_VARS.get(provider, ""),
|
|
)
|
|
self.model = _DEFAULT_MODELS.get(provider, "")
|
|
self.client: Optional[Anthropic] = None
|
|
|
|
if provider == "claude":
|
|
self.client = Anthropic(api_key=self.api_key)
|
|
|
|
def chat(
|
|
self,
|
|
messages: List[Dict],
|
|
system: Optional[str] = None,
|
|
max_tokens: int = 4096,
|
|
) -> str:
|
|
"""Send chat request and get response."""
|
|
if self.provider == "claude":
|
|
response = self.client.messages.create(
|
|
model=self.model,
|
|
max_tokens=max_tokens,
|
|
system=system or "",
|
|
messages=messages,
|
|
)
|
|
return response.content[0].text
|
|
|
|
if self.provider == "glm":
|
|
payload = {
|
|
"model": self.model,
|
|
"messages": [
|
|
{"role": "system", "content": system or ""},
|
|
] + messages,
|
|
"max_tokens": max_tokens,
|
|
}
|
|
headers = {"Authorization": f"Bearer {self.api_key}"}
|
|
response = requests.post(
|
|
_GLM_BASE_URL, json=payload, headers=headers,
|
|
)
|
|
return response.json()["choices"][0]["message"]["content"]
|
|
|
|
raise ValueError(f"Unsupported provider: {self.provider}")
|
|
|
|
def chat_with_tools(
|
|
self,
|
|
messages: List[Dict],
|
|
tools: List[Dict[str, Any]],
|
|
system: Optional[str] = None,
|
|
max_tokens: int = 4096,
|
|
use_cache: bool = False,
|
|
) -> Message:
|
|
"""Send chat request with tool support. Returns full Message object.
|
|
|
|
Args:
|
|
use_cache: Enable prompt caching for Sonnet models (saves 90% on repeated context)
|
|
"""
|
|
if self.provider != "claude":
|
|
raise ValueError("Tool use only supported for Claude provider")
|
|
|
|
# Enable caching only for Sonnet models (not worth it for Haiku)
|
|
enable_caching = use_cache and "sonnet" in self.model.lower()
|
|
|
|
# Structure system prompt for optimal caching
|
|
if enable_caching and system:
|
|
# Convert string to list format with cache control
|
|
system_blocks = [
|
|
{
|
|
"type": "text",
|
|
"text": system,
|
|
"cache_control": {"type": "ephemeral"}
|
|
}
|
|
]
|
|
else:
|
|
system_blocks = system or ""
|
|
|
|
response = self.client.messages.create(
|
|
model=self.model,
|
|
max_tokens=max_tokens,
|
|
system=system_blocks,
|
|
messages=messages,
|
|
tools=tools,
|
|
)
|
|
return response
|
|
|
|
def set_model(self, model: str) -> None:
|
|
"""Change the active model."""
|
|
self.model = model
|