Refactor: Clean up obsolete files and organize codebase structure
This commit removes deprecated modules and reorganizes code into logical directories: Deleted files (superseded by newer systems): - claude_code_server.py (replaced by agent-sdk direct integration) - heartbeat.py (superseded by scheduled_tasks.py) - pulse_brain.py (unused in production) - config/pulse_brain_config.py (obsolete config) Created directory structure: - examples/ (7 example files: example_*.py, demo_*.py) - tests/ (5 test files: test_*.py) Updated imports: - agent.py: Removed heartbeat module and all enable_heartbeat logic - bot_runner.py: Removed heartbeat parameter from Agent initialization - llm_interface.py: Updated deprecated claude_code_server message Preserved essential files: - hooks.py (for future use) - adapters/skill_integration.py (for future use) - All Google integration tools (Gmail, Calendar, Contacts) - GLM provider code (backward compatibility) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
36
tests/test_agent_hybrid.py
Normal file
36
tests/test_agent_hybrid.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""Test agent with hybrid search."""
|
||||
|
||||
from agent import Agent
|
||||
|
||||
print("Initializing agent with hybrid search...")
|
||||
agent = Agent(provider="claude")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("TESTING AGENT MEMORY RECALL WITH HYBRID SEARCH")
|
||||
print("="*60)
|
||||
|
||||
# Test 1: Semantic query - ask about cost in different words
|
||||
print("\n1. Testing semantic recall: 'How can I save money on API calls?'")
|
||||
print("-" * 60)
|
||||
response = agent.chat("How can I save money on API calls?", username="alice")
|
||||
print(response)
|
||||
|
||||
# Test 2: Ask about birthday (semantic search should find personal info)
|
||||
print("\n" + "="*60)
|
||||
print("2. Testing semantic recall: 'What's my birthday?'")
|
||||
print("-" * 60)
|
||||
response = agent.chat("What's my birthday?", username="alice")
|
||||
print(response)
|
||||
|
||||
# Test 3: Ask about specific technical detail
|
||||
print("\n" + "="*60)
|
||||
print("3. Testing keyword recall: 'What search technology are we using?'")
|
||||
print("-" * 60)
|
||||
response = agent.chat("What search technology are we using?", username="alice")
|
||||
print(response)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Test complete!")
|
||||
print("="*60)
|
||||
|
||||
agent.shutdown()
|
||||
51
tests/test_hybrid_search.py
Normal file
51
tests/test_hybrid_search.py
Normal file
@@ -0,0 +1,51 @@
|
||||
"""Test hybrid search implementation."""
|
||||
|
||||
from memory_system import MemorySystem
|
||||
|
||||
print("Initializing memory system with hybrid search...")
|
||||
memory = MemorySystem()
|
||||
|
||||
print("\nRe-syncing all memories to generate embeddings...")
|
||||
# Force re-index by clearing the database
|
||||
memory.db.execute("DELETE FROM chunks")
|
||||
memory.db.execute("DELETE FROM chunks_fts")
|
||||
memory.db.execute("DELETE FROM files")
|
||||
memory.db.commit()
|
||||
|
||||
# Re-sync to generate embeddings
|
||||
memory.sync()
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("TESTING HYBRID SEARCH")
|
||||
print("="*60)
|
||||
|
||||
# Test 1: Semantic search (should work even with different wording)
|
||||
print("\n1. Testing semantic search for 'when was I born' (looking for birthday):")
|
||||
results = memory.search_hybrid("when was I born", max_results=3)
|
||||
for i, result in enumerate(results, 1):
|
||||
print(f"\n Result {i} (score: {result['score']:.3f}):")
|
||||
print(f" {result['path']}:{result['start_line']}-{result['end_line']}")
|
||||
print(f" {result['snippet'][:100]}...")
|
||||
|
||||
# Test 2: Technical keyword search
|
||||
print("\n2. Testing keyword search for 'SQLite FTS5':")
|
||||
results = memory.search_hybrid("SQLite FTS5", max_results=3)
|
||||
for i, result in enumerate(results, 1):
|
||||
print(f"\n Result {i} (score: {result['score']:.3f}):")
|
||||
print(f" {result['path']}:{result['start_line']}-{result['end_line']}")
|
||||
print(f" {result['snippet'][:100]}...")
|
||||
|
||||
# Test 3: Conceptual search
|
||||
print("\n3. Testing conceptual search for 'cost optimization':")
|
||||
results = memory.search_hybrid("cost optimization", max_results=3)
|
||||
for i, result in enumerate(results, 1):
|
||||
print(f"\n Result {i} (score: {result['score']:.3f}):")
|
||||
print(f" {result['path']}:{result['start_line']}-{result['end_line']}")
|
||||
print(f" {result['snippet'][:100]}...")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print(f"Vector index size: {len(memory.vector_index)} embeddings")
|
||||
print("="*60)
|
||||
|
||||
memory.close()
|
||||
print("\nTest complete!")
|
||||
209
tests/test_installation.py
Normal file
209
tests/test_installation.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
Installation verification script for Windows 11.
|
||||
Tests all core components without making API calls.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def test_python_version() -> bool:
|
||||
"""Check Python version is 3.8+."""
|
||||
version = sys.version_info
|
||||
if version.major >= 3 and version.minor >= 8:
|
||||
print(f" Python {version.major}.{version.minor}.{version.micro}")
|
||||
return True
|
||||
print(f" [FAIL] Python {version.major}.{version.minor} is too old")
|
||||
print(" Please install Python 3.8 or higher")
|
||||
return False
|
||||
|
||||
|
||||
def test_imports() -> bool:
|
||||
"""Test all required imports."""
|
||||
required_modules = [
|
||||
("anthropic", "Anthropic SDK"),
|
||||
("requests", "Requests"),
|
||||
("watchdog", "Watchdog"),
|
||||
]
|
||||
|
||||
optional_modules = [
|
||||
("slack_bolt", "Slack Bolt (for Slack adapter)"),
|
||||
("telegram", "python-telegram-bot (for Telegram adapter)"),
|
||||
("yaml", "PyYAML"),
|
||||
]
|
||||
|
||||
all_ok = True
|
||||
|
||||
print("\nRequired modules:")
|
||||
for module_name, display_name in required_modules:
|
||||
try:
|
||||
__import__(module_name)
|
||||
print(f" {display_name}")
|
||||
except ImportError:
|
||||
print(f" [FAIL] {display_name} not installed")
|
||||
all_ok = False
|
||||
|
||||
print("\nOptional modules:")
|
||||
for module_name, display_name in optional_modules:
|
||||
try:
|
||||
__import__(module_name)
|
||||
print(f" {display_name}")
|
||||
except ImportError:
|
||||
print(f" [SKIP] {display_name} (optional)")
|
||||
|
||||
return all_ok
|
||||
|
||||
|
||||
def test_core_modules() -> bool:
|
||||
"""Test core ajarbot modules can be imported."""
|
||||
core_modules = [
|
||||
"agent",
|
||||
"memory_system",
|
||||
"llm_interface",
|
||||
"pulse_brain",
|
||||
"scheduled_tasks",
|
||||
"heartbeat",
|
||||
"hooks",
|
||||
]
|
||||
|
||||
all_ok = True
|
||||
print("\nCore modules:")
|
||||
for module_name in core_modules:
|
||||
try:
|
||||
__import__(module_name)
|
||||
print(f" {module_name}.py")
|
||||
except Exception as e:
|
||||
print(f" [FAIL] {module_name}.py: {e}")
|
||||
all_ok = False
|
||||
|
||||
return all_ok
|
||||
|
||||
|
||||
def test_file_structure() -> bool:
|
||||
"""Check required files and directories exist."""
|
||||
required_paths = [
|
||||
("agent.py", "file"),
|
||||
("memory_system.py", "file"),
|
||||
("llm_interface.py", "file"),
|
||||
("pulse_brain.py", "file"),
|
||||
("bot_runner.py", "file"),
|
||||
("requirements.txt", "file"),
|
||||
("adapters", "dir"),
|
||||
("config", "dir"),
|
||||
("docs", "dir"),
|
||||
]
|
||||
|
||||
all_ok = True
|
||||
print("\nProject structure:")
|
||||
for path_str, path_type in required_paths:
|
||||
path = Path(path_str)
|
||||
if path_type == "file":
|
||||
exists = path.is_file()
|
||||
else:
|
||||
exists = path.is_dir()
|
||||
|
||||
if exists:
|
||||
print(f" {path_str}")
|
||||
else:
|
||||
print(f" [FAIL] {path_str} not found")
|
||||
all_ok = False
|
||||
|
||||
return all_ok
|
||||
|
||||
|
||||
def test_environment() -> bool:
|
||||
"""Check environment variables."""
|
||||
import os
|
||||
|
||||
print("\nEnvironment variables:")
|
||||
|
||||
api_key = os.getenv("ANTHROPIC_API_KEY")
|
||||
if api_key:
|
||||
masked = api_key[:10] + "..." + api_key[-4:]
|
||||
print(f" ANTHROPIC_API_KEY: {masked}")
|
||||
else:
|
||||
print(" [WARN] ANTHROPIC_API_KEY not set")
|
||||
print(" You'll need to set this before running examples")
|
||||
|
||||
glm_key = os.getenv("GLM_API_KEY")
|
||||
if glm_key:
|
||||
print(" GLM_API_KEY: set (optional)")
|
||||
else:
|
||||
print(" [INFO] GLM_API_KEY not set (optional)")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_memory_workspace() -> bool:
|
||||
"""Check or create memory workspace."""
|
||||
workspace = Path("memory_workspace")
|
||||
|
||||
print("\nMemory workspace:")
|
||||
if workspace.exists():
|
||||
print(f" memory_workspace/ exists")
|
||||
|
||||
db_file = workspace / "memory.db"
|
||||
if db_file.exists():
|
||||
size_mb = db_file.stat().st_size / 1024 / 1024
|
||||
print(f" Database size: {size_mb:.2f} MB")
|
||||
else:
|
||||
print(" [INFO] memory_workspace/ will be created on first run")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Run all tests."""
|
||||
print("=" * 60)
|
||||
print("Ajarbot Installation Verification")
|
||||
print("=" * 60)
|
||||
|
||||
tests = [
|
||||
("Python version", test_python_version),
|
||||
("Dependencies", test_imports),
|
||||
("Core modules", test_core_modules),
|
||||
("File structure", test_file_structure),
|
||||
("Environment", test_environment),
|
||||
("Memory workspace", test_memory_workspace),
|
||||
]
|
||||
|
||||
results = {}
|
||||
for test_name, test_func in tests:
|
||||
print(f"\n[TEST] {test_name}")
|
||||
try:
|
||||
results[test_name] = test_func()
|
||||
except Exception as e:
|
||||
print(f" [ERROR] {e}")
|
||||
results[test_name] = False
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Summary")
|
||||
print("=" * 60)
|
||||
|
||||
passed = sum(1 for result in results.values() if result)
|
||||
total = len(results)
|
||||
|
||||
for test_name, result in results.items():
|
||||
status = "PASS" if result else "FAIL"
|
||||
print(f" [{status}] {test_name}")
|
||||
|
||||
print(f"\nPassed: {passed}/{total}")
|
||||
|
||||
if passed == total:
|
||||
print("\nAll tests passed!")
|
||||
print("\nNext steps:")
|
||||
print(" 1. Set ANTHROPIC_API_KEY if not already set")
|
||||
print(" 2. Run: python example_usage.py")
|
||||
print(" 3. See docs/WINDOWS_DEPLOYMENT.md for more options")
|
||||
else:
|
||||
print("\nSome tests failed. Please:")
|
||||
print(" 1. Ensure Python 3.8+ is installed")
|
||||
print(" 2. Run: pip install -r requirements.txt")
|
||||
print(" 3. Check you're in the correct directory")
|
||||
print(" 4. See docs/WINDOWS_DEPLOYMENT.md for help")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
210
tests/test_scheduler.py
Normal file
210
tests/test_scheduler.py
Normal file
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test the TaskScheduler system."""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
|
||||
from agent import Agent
|
||||
from scheduled_tasks import TaskScheduler
|
||||
|
||||
|
||||
def test_schedule_calculation() -> bool:
|
||||
"""Test schedule time calculations."""
|
||||
print("=" * 60)
|
||||
print("Testing Schedule Calculations")
|
||||
print("=" * 60)
|
||||
|
||||
agent = Agent(
|
||||
provider="claude",
|
||||
workspace_dir="./memory_workspace",
|
||||
enable_heartbeat=False,
|
||||
)
|
||||
scheduler = TaskScheduler(
|
||||
agent, config_file="config/scheduled_tasks.yaml",
|
||||
)
|
||||
|
||||
test_schedules = [
|
||||
"hourly",
|
||||
"daily 08:00",
|
||||
"daily 18:00",
|
||||
"weekly mon 09:00",
|
||||
"weekly fri 17:00",
|
||||
]
|
||||
|
||||
now = datetime.now()
|
||||
print(
|
||||
f"\nCurrent time: "
|
||||
f"{now.strftime('%Y-%m-%d %H:%M:%S %A')}\n"
|
||||
)
|
||||
|
||||
for schedule in test_schedules:
|
||||
try:
|
||||
next_run = scheduler._calculate_next_run(schedule)
|
||||
time_until = next_run - now
|
||||
hours_until = time_until.total_seconds() / 3600
|
||||
|
||||
formatted = next_run.strftime("%Y-%m-%d %H:%M %A")
|
||||
print(f"{schedule:20} -> {formatted}")
|
||||
print(
|
||||
f"{'':20} (in {hours_until:.1f} hours)"
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"{schedule:20} -> ERROR: {e}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_task_loading() -> bool:
|
||||
"""Test loading tasks from config."""
|
||||
print("\n" + "=" * 60)
|
||||
print("Testing Task Loading")
|
||||
print("=" * 60)
|
||||
|
||||
agent = Agent(
|
||||
provider="claude",
|
||||
workspace_dir="./memory_workspace",
|
||||
enable_heartbeat=False,
|
||||
)
|
||||
scheduler = TaskScheduler(
|
||||
agent, config_file="config/scheduled_tasks.yaml",
|
||||
)
|
||||
|
||||
tasks = scheduler.list_tasks()
|
||||
|
||||
print(f"\nLoaded {len(tasks)} task(s):\n")
|
||||
|
||||
for i, task in enumerate(tasks, 1):
|
||||
print(f"{i}. {task['name']}")
|
||||
print(f" Schedule: {task['schedule']}")
|
||||
print(f" Enabled: {task['enabled']}")
|
||||
print(f" Next run: {task['next_run']}")
|
||||
if task["send_to"]:
|
||||
print(f" Send to: {task['send_to']}")
|
||||
print()
|
||||
|
||||
return len(tasks) > 0
|
||||
|
||||
|
||||
def test_manual_execution() -> bool:
|
||||
"""Test manual task execution."""
|
||||
print("=" * 60)
|
||||
print("Testing Manual Task Execution")
|
||||
print("=" * 60)
|
||||
|
||||
agent = Agent(
|
||||
provider="claude",
|
||||
workspace_dir="./memory_workspace",
|
||||
enable_heartbeat=False,
|
||||
)
|
||||
scheduler = TaskScheduler(
|
||||
agent, config_file="config/scheduled_tasks.yaml",
|
||||
)
|
||||
|
||||
if not scheduler.tasks:
|
||||
print(
|
||||
"\nNo tasks configured. "
|
||||
"Create tasks in config/scheduled_tasks.yaml"
|
||||
)
|
||||
return False
|
||||
|
||||
test_task = next(
|
||||
(t for t in scheduler.tasks if t.enabled),
|
||||
scheduler.tasks[0],
|
||||
)
|
||||
|
||||
print(f"\nManually executing task: {test_task.name}")
|
||||
print(f"Prompt: {test_task.prompt[:100]}...")
|
||||
print("\nExecuting...\n")
|
||||
|
||||
result = scheduler.run_task_now(test_task.name)
|
||||
print(f"\nResult: {result}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_scheduler_status() -> bool:
|
||||
"""Test scheduler status reporting."""
|
||||
print("\n" + "=" * 60)
|
||||
print("Testing Scheduler Status")
|
||||
print("=" * 60)
|
||||
|
||||
agent = Agent(
|
||||
provider="claude",
|
||||
workspace_dir="./memory_workspace",
|
||||
enable_heartbeat=False,
|
||||
)
|
||||
scheduler = TaskScheduler(
|
||||
agent, config_file="config/scheduled_tasks.yaml",
|
||||
)
|
||||
|
||||
print(f"\nScheduler running: {scheduler.running}")
|
||||
print(f"Config file: {scheduler.config_file}")
|
||||
print(f"Tasks loaded: {len(scheduler.tasks)}")
|
||||
print(f"Adapters registered: {len(scheduler.adapters)}")
|
||||
|
||||
enabled_count = sum(
|
||||
1 for t in scheduler.tasks if t.enabled
|
||||
)
|
||||
print(
|
||||
f"Enabled tasks: "
|
||||
f"{enabled_count}/{len(scheduler.tasks)}"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main() -> bool:
|
||||
"""Run all tests."""
|
||||
print("\nTaskScheduler Test Suite\n")
|
||||
|
||||
tests = [
|
||||
("Schedule Calculation", test_schedule_calculation),
|
||||
("Task Loading", test_task_loading),
|
||||
("Scheduler Status", test_scheduler_status),
|
||||
# Commented out - uses API tokens:
|
||||
# ("Manual Execution", test_manual_execution),
|
||||
]
|
||||
|
||||
results = []
|
||||
|
||||
for test_name, test_func in tests:
|
||||
try:
|
||||
result = test_func()
|
||||
results.append((test_name, result))
|
||||
except Exception as e:
|
||||
print(f"\nERROR in {test_name}: {e}")
|
||||
traceback.print_exc()
|
||||
results.append((test_name, False))
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Test Summary")
|
||||
print("=" * 60)
|
||||
|
||||
for test_name, passed in results:
|
||||
status = "+ PASS" if passed else "x FAIL"
|
||||
print(f" {status}: {test_name}")
|
||||
|
||||
passed_count = sum(1 for _, p in results if p)
|
||||
total_count = len(results)
|
||||
|
||||
print(f"\n{passed_count}/{total_count} tests passed")
|
||||
|
||||
if passed_count == total_count:
|
||||
print("\nAll tests passed! Scheduler is ready to use.")
|
||||
print("\nNext steps:")
|
||||
print(" 1. Edit config/scheduled_tasks.yaml")
|
||||
print(" 2. Set enabled: true for tasks you want")
|
||||
print(" 3. Add your channel IDs")
|
||||
print(
|
||||
" 4. Run: python example_bot_with_scheduler.py"
|
||||
)
|
||||
else:
|
||||
print("\nSome tests failed. Check the output above.")
|
||||
|
||||
return passed_count == total_count
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
188
tests/test_skills.py
Normal file
188
tests/test_skills.py
Normal file
@@ -0,0 +1,188 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test script to verify local skills are properly set up."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from adapters.base import InboundMessage, MessageType
|
||||
from adapters.skill_integration import SkillInvoker
|
||||
|
||||
|
||||
def test_skill_discovery() -> bool:
|
||||
"""Test that skills are discovered correctly."""
|
||||
print("=" * 60)
|
||||
print("Testing Skill Discovery")
|
||||
print("=" * 60)
|
||||
|
||||
invoker = SkillInvoker()
|
||||
skills = invoker.list_available_skills()
|
||||
|
||||
print(f"\nFound {len(skills)} skill(s):")
|
||||
for skill in skills:
|
||||
print(f" + {skill}")
|
||||
|
||||
if not skills:
|
||||
print(" ! No skills found!")
|
||||
print(
|
||||
" Create skills in: "
|
||||
".claude/skills/<skill-name>/SKILL.md"
|
||||
)
|
||||
|
||||
return len(skills) > 0
|
||||
|
||||
|
||||
def test_skill_info() -> bool:
|
||||
"""Test skill info retrieval."""
|
||||
print("\n" + "=" * 60)
|
||||
print("Testing Skill Info")
|
||||
print("=" * 60)
|
||||
|
||||
invoker = SkillInvoker()
|
||||
skills = invoker.list_available_skills()
|
||||
|
||||
for skill in skills:
|
||||
info = invoker.get_skill_info(skill)
|
||||
print(f"\n/{skill}:")
|
||||
|
||||
if info:
|
||||
fields = [
|
||||
("Name", "name"),
|
||||
("Description", "description"),
|
||||
("User-invocable", "user-invocable"),
|
||||
("Disable auto-invoke", "disable-model-invocation"),
|
||||
("Allowed tools", "allowed-tools"),
|
||||
("Context", "context"),
|
||||
("Agent", "agent"),
|
||||
("Path", "path"),
|
||||
]
|
||||
for label, key in fields:
|
||||
print(f" {label}: {info.get(key, 'N/A')}")
|
||||
|
||||
body_preview = info.get("body", "")[:100]
|
||||
print(f" Instructions preview: {body_preview}...")
|
||||
else:
|
||||
print(" ! Could not load skill info")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_skill_structure() -> bool:
|
||||
"""Test skill directory structure."""
|
||||
print("\n" + "=" * 60)
|
||||
print("Testing Skill Structure")
|
||||
print("=" * 60)
|
||||
|
||||
skills_dir = Path(".claude/skills")
|
||||
|
||||
if not skills_dir.exists():
|
||||
print(
|
||||
" ! Skills directory not found: "
|
||||
".claude/skills/"
|
||||
)
|
||||
return False
|
||||
|
||||
print(f"\nSkills directory: {skills_dir.absolute()}")
|
||||
|
||||
for skill_dir in skills_dir.iterdir():
|
||||
if not skill_dir.is_dir():
|
||||
continue
|
||||
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
examples_dir = skill_dir / "examples"
|
||||
|
||||
md_icon = "+" if skill_md.exists() else "x"
|
||||
ex_icon = "+" if examples_dir.exists() else "-"
|
||||
|
||||
print(f"\n {skill_dir.name}/")
|
||||
print(f" SKILL.md: {md_icon}")
|
||||
print(f" examples/: {ex_icon} (optional)")
|
||||
|
||||
if examples_dir.exists():
|
||||
for ef in examples_dir.glob("*.md"):
|
||||
print(f" - {ef.name}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_preprocessor() -> bool:
|
||||
"""Test skill preprocessor logic."""
|
||||
print("\n" + "=" * 60)
|
||||
print("Testing Skill Preprocessor")
|
||||
print("=" * 60)
|
||||
|
||||
test_message = InboundMessage(
|
||||
platform="test",
|
||||
user_id="test123",
|
||||
username="testuser",
|
||||
text="/adapter-dev create WhatsApp adapter",
|
||||
channel_id="test-channel",
|
||||
thread_id=None,
|
||||
reply_to_id=None,
|
||||
message_type=MessageType.TEXT,
|
||||
metadata={},
|
||||
raw=None,
|
||||
)
|
||||
|
||||
print(f"\nTest message: {test_message.text}")
|
||||
|
||||
if not test_message.text.startswith("/"):
|
||||
return False
|
||||
|
||||
parts = test_message.text.split(maxsplit=1)
|
||||
skill_name = parts[0][1:]
|
||||
args = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
print(f" Detected skill: {skill_name}")
|
||||
print(f" Arguments: {args}")
|
||||
|
||||
invoker = SkillInvoker()
|
||||
if skill_name in invoker.list_available_skills():
|
||||
print(" + Skill exists and can be invoked")
|
||||
return True
|
||||
|
||||
print(" ! Skill not found")
|
||||
return False
|
||||
|
||||
|
||||
def main() -> bool:
|
||||
"""Run all tests."""
|
||||
print("\nAjarbot Skills Test Suite\n")
|
||||
|
||||
results = [
|
||||
("Skill Discovery", test_skill_discovery()),
|
||||
("Skill Info", test_skill_info()),
|
||||
("Skill Structure", test_skill_structure()),
|
||||
("Skill Preprocessor", test_preprocessor()),
|
||||
]
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Test Summary")
|
||||
print("=" * 60)
|
||||
|
||||
for test_name, passed in results:
|
||||
status = "+ PASS" if passed else "x FAIL"
|
||||
print(f" {status}: {test_name}")
|
||||
|
||||
passed_count = sum(1 for _, p in results if p)
|
||||
total_count = len(results)
|
||||
|
||||
print(f"\n{passed_count}/{total_count} tests passed")
|
||||
|
||||
if passed_count == total_count:
|
||||
print("\nAll tests passed! Skills are ready to use.")
|
||||
print("\nNext steps:")
|
||||
print(" 1. Try invoking a skill: /adapter-dev")
|
||||
print(
|
||||
" 2. Test in bot: "
|
||||
"python example_bot_with_skills.py"
|
||||
)
|
||||
print(" 3. Create your own skills in: .claude/skills/")
|
||||
else:
|
||||
print("\nSome tests failed. Check the output above.")
|
||||
|
||||
return passed_count == total_count
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
Reference in New Issue
Block a user