178 lines
5.2 KiB
Python
178 lines
5.2 KiB
Python
|
|
#!/usr/bin/env python3
|
||
|
|
"""
|
||
|
|
Claw Code Runtime - Layer 2 of Archon Architecture
|
||
|
|
|
||
|
|
This harness contains all business logic, orchestrating between
|
||
|
|
the thin Hermes profile (Layer 1) and Gemma intelligence (Layer 3).
|
||
|
|
"""
|
||
|
|
|
||
|
|
import sys
|
||
|
|
import json
|
||
|
|
from typing import Dict, Any, Optional, List
|
||
|
|
from pathlib import Path
|
||
|
|
|
||
|
|
# Add parent to path for imports
|
||
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||
|
|
|
||
|
|
from runtime.tool_registry import ToolRegistry
|
||
|
|
|
||
|
|
|
||
|
|
class ClawHarness:
|
||
|
|
"""
|
||
|
|
The Claw Runtime - where all logic lives.
|
||
|
|
|
||
|
|
Responsibilities:
|
||
|
|
- Message routing and orchestration
|
||
|
|
- Tool execution management
|
||
|
|
- Context window management
|
||
|
|
- Conversation state tracking
|
||
|
|
"""
|
||
|
|
|
||
|
|
def __init__(self, ollama_host: str = "http://localhost:11434"):
|
||
|
|
self.ollama_host = ollama_host
|
||
|
|
self.tools = ToolRegistry()
|
||
|
|
self.conversation_history: List[Dict[str, Any]] = []
|
||
|
|
self.session_id: Optional[str] = None
|
||
|
|
|
||
|
|
def process_message(self, message: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
||
|
|
"""
|
||
|
|
Main entry point for processing messages.
|
||
|
|
|
||
|
|
Flow:
|
||
|
|
1. Parse intent
|
||
|
|
2. Determine if tools needed
|
||
|
|
3. Route to Gemma or execute tools
|
||
|
|
4. Return formatted response
|
||
|
|
"""
|
||
|
|
context = context or {}
|
||
|
|
|
||
|
|
# Log to conversation history
|
||
|
|
self.conversation_history.append({
|
||
|
|
"role": "user",
|
||
|
|
"content": message,
|
||
|
|
"timestamp": self._get_timestamp()
|
||
|
|
})
|
||
|
|
|
||
|
|
# Check for tool invocation
|
||
|
|
tool_call = self.tools.parse_tool_call(message)
|
||
|
|
if tool_call:
|
||
|
|
result = self._execute_tool(tool_call)
|
||
|
|
return self._format_response(
|
||
|
|
content=result,
|
||
|
|
tool_used=tool_call["name"],
|
||
|
|
metadata={"tool_result": True}
|
||
|
|
)
|
||
|
|
|
||
|
|
# Route to intelligence layer (Gemma)
|
||
|
|
return self._route_to_intelligence(message, context)
|
||
|
|
|
||
|
|
def _route_to_intelligence(self, message: str, context: Dict) -> Dict[str, Any]:
|
||
|
|
"""Route message to Gemma via Ollama."""
|
||
|
|
from ollama_client import OllamaClient
|
||
|
|
|
||
|
|
client = OllamaClient(base_url=self.ollama_host)
|
||
|
|
|
||
|
|
# Build prompt with context
|
||
|
|
prompt = self._build_prompt(message, context)
|
||
|
|
|
||
|
|
# Get response from Gemma
|
||
|
|
response = client.generate(
|
||
|
|
model="gemma3:4b",
|
||
|
|
prompt=prompt,
|
||
|
|
system=self._get_system_prompt()
|
||
|
|
)
|
||
|
|
|
||
|
|
# Log response
|
||
|
|
self.conversation_history.append({
|
||
|
|
"role": "assistant",
|
||
|
|
"content": response.get("response", ""),
|
||
|
|
"timestamp": self._get_timestamp()
|
||
|
|
})
|
||
|
|
|
||
|
|
return self._format_response(
|
||
|
|
content=response.get("response", ""),
|
||
|
|
metadata={
|
||
|
|
"model": "gemma3:4b",
|
||
|
|
"tokens_used": response.get("eval_count", 0)
|
||
|
|
}
|
||
|
|
)
|
||
|
|
|
||
|
|
def _execute_tool(self, tool_call: Dict) -> str:
|
||
|
|
"""Execute a tool and return result."""
|
||
|
|
return self.tools.execute(tool_call)
|
||
|
|
|
||
|
|
def _build_prompt(self, message: str, context: Dict) -> str:
|
||
|
|
"""Build context-aware prompt for Gemma."""
|
||
|
|
history = "\n".join([
|
||
|
|
f"{msg['role']}: {msg['content']}"
|
||
|
|
for msg in self.conversation_history[-5:] # Last 5 messages
|
||
|
|
])
|
||
|
|
|
||
|
|
return f"""Previous conversation:
|
||
|
|
{history}
|
||
|
|
|
||
|
|
User: {message}
|
||
|
|
|
||
|
|
Assistant:"""
|
||
|
|
|
||
|
|
def _get_system_prompt(self) -> str:
|
||
|
|
"""Get system prompt for Gemma."""
|
||
|
|
return """You are the Archon POC, a helpful AI assistant.
|
||
|
|
Be concise but thorough. Tag your response with #archon-poc."""
|
||
|
|
|
||
|
|
def _format_response(self, content: str, tool_used: Optional[str] = None,
|
||
|
|
metadata: Optional[Dict] = None) -> Dict[str, Any]:
|
||
|
|
"""Format response for return to Layer 1."""
|
||
|
|
response = {
|
||
|
|
"content": content,
|
||
|
|
"status": "success",
|
||
|
|
"layer": "claw_runtime",
|
||
|
|
"tag": "#archon-poc"
|
||
|
|
}
|
||
|
|
|
||
|
|
if tool_used:
|
||
|
|
response["tool_used"] = tool_used
|
||
|
|
if metadata:
|
||
|
|
response["metadata"] = metadata
|
||
|
|
|
||
|
|
return response
|
||
|
|
|
||
|
|
def _get_timestamp(self) -> str:
|
||
|
|
"""Get current timestamp."""
|
||
|
|
from datetime import datetime
|
||
|
|
return datetime.now().isoformat()
|
||
|
|
|
||
|
|
|
||
|
|
def main():
|
||
|
|
"""CLI entry point."""
|
||
|
|
harness = ClawHarness()
|
||
|
|
|
||
|
|
if len(sys.argv) > 1:
|
||
|
|
message = " ".join(sys.argv[1:])
|
||
|
|
result = harness.process_message(message)
|
||
|
|
print(json.dumps(result, indent=2))
|
||
|
|
else:
|
||
|
|
# Interactive mode
|
||
|
|
print("Archon Harness - Interactive Mode")
|
||
|
|
print("Type 'exit' to quit\n")
|
||
|
|
|
||
|
|
while True:
|
||
|
|
try:
|
||
|
|
message = input("> ")
|
||
|
|
if message.lower() in ("exit", "quit"):
|
||
|
|
break
|
||
|
|
|
||
|
|
result = harness.process_message(message)
|
||
|
|
print(f"\n{result['content']}\n")
|
||
|
|
|
||
|
|
except KeyboardInterrupt:
|
||
|
|
break
|
||
|
|
except EOFError:
|
||
|
|
break
|
||
|
|
|
||
|
|
print("\nGoodbye!")
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
main()
|