Initial Archon Kion implementation
- Complete daemon with FastAPI - Ollama client for local AI (gemma3:4b) - Telegram webhook handler - Hermes bridge (thin profile) - Systemd service definition - All unit tests passing
This commit is contained in:
79
README.md
Normal file
79
README.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Archon Kion
|
||||
|
||||
Local AI assistant daemon with Hermes integration. Processes Telegram and Gitea webhooks, routes queries to local Ollama instance.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
|
||||
│ Telegram │────▶│ Archon Kion │────▶│ Ollama │
|
||||
│ Webhooks │ │ Daemon │ │ localhost │
|
||||
└─────────────┘ └──────┬──────┘ └─────────────┘
|
||||
│
|
||||
┌──────┴──────┐
|
||||
│ Hermes │
|
||||
│ Profile │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
## Components
|
||||
|
||||
- **src/main.py**: Daemon entry point, FastAPI web server
|
||||
- **src/ollama_client.py**: Ollama API client
|
||||
- **src/telegram_bot.py**: Telegram webhook handler
|
||||
- **src/hermes_bridge.py**: Hermes profile integration
|
||||
- **config/archon-kion.yaml**: Configuration file
|
||||
- **hermes-profile/profile.yaml**: Thin Hermes profile
|
||||
- **systemd/archon-kion.service**: Systemd service definition
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone http://143.198.27.163:3000/ezra/archon-kion.git
|
||||
cd archon-kion
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Configure
|
||||
edit config/archon-kion.yaml
|
||||
|
||||
# Run
|
||||
cd src && python main.py
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Edit `config/archon-kion.yaml`:
|
||||
|
||||
```yaml
|
||||
ollama:
|
||||
host: localhost
|
||||
port: 11434
|
||||
model: gemma3:4b
|
||||
|
||||
telegram:
|
||||
webhook_url: https://your-domain.com/webhook
|
||||
token: ${TELEGRAM_BOT_TOKEN}
|
||||
|
||||
hermes:
|
||||
profile_path: ./hermes-profile/profile.yaml
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
- `/status` - Check daemon and Ollama status
|
||||
- `/memory` - Show conversation memory
|
||||
- `/query <text>` - Send query to Ollama
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
cd tests
|
||||
python -m pytest test_archon.py -v
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT - Hermes Project
|
||||
23
config/archon-kion.yaml
Normal file
23
config/archon-kion.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Archon Kion Configuration
|
||||
|
||||
ollama:
|
||||
host: localhost
|
||||
port: 11434
|
||||
model: gemma3:4b
|
||||
|
||||
telegram:
|
||||
# Get token from @Rockachopa or set TELEGRAM_BOT_TOKEN env var
|
||||
token: ${TELEGRAM_BOT_TOKEN}
|
||||
webhook_url: ${TELEGRAM_WEBHOOK_URL:-http://localhost:8080/webhook/telegram}
|
||||
|
||||
hermes:
|
||||
profile_path: ./hermes-profile/profile.yaml
|
||||
|
||||
logging:
|
||||
level: INFO
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Memory settings
|
||||
memory:
|
||||
max_messages: 20 # Keep last 10 exchanges
|
||||
persist: false # Don't persist to disk (privacy)
|
||||
33
hermes-profile/profile.yaml
Normal file
33
hermes-profile/profile.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
# Hermes Profile: Archon Kion
|
||||
# THIN profile - identity, constraints, routing only
|
||||
# NO reasoning logic - all intelligence in runtime layer
|
||||
|
||||
identity:
|
||||
name: "Archon Kion"
|
||||
role: "Local AI Assistant"
|
||||
description: "Runs entirely on local Ollama instance"
|
||||
instructions:
|
||||
- "Be helpful, concise, and accurate."
|
||||
- "You are part of the Hermes system of autonomous agents."
|
||||
- "Always prefer local tools and resources over external APIs."
|
||||
|
||||
constraints:
|
||||
local_only: true
|
||||
model: gemma3:4b
|
||||
max_tokens: 4096
|
||||
temperature: 0.7
|
||||
allowed_channels:
|
||||
- telegram
|
||||
- gitea_webhooks
|
||||
|
||||
routing:
|
||||
tag: "#archon-kion"
|
||||
priority: 1
|
||||
filters:
|
||||
- "direct_message"
|
||||
- "tag_mention"
|
||||
|
||||
capabilities:
|
||||
- text_generation
|
||||
- conversation_memory
|
||||
- command_processing
|
||||
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
fastapi>=0.104.0
|
||||
uvicorn>=0.24.0
|
||||
pydantic>=2.5.0
|
||||
httpx>=0.25.0
|
||||
pyyaml>=6.0.1
|
||||
pytest>=7.4.0
|
||||
pytest-asyncio>=0.21.0
|
||||
BIN
src/__pycache__/hermes_bridge.cpython-312.pyc
Normal file
BIN
src/__pycache__/hermes_bridge.cpython-312.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/ollama_client.cpython-312.pyc
Normal file
BIN
src/__pycache__/ollama_client.cpython-312.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/telegram_bot.cpython-312.pyc
Normal file
BIN
src/__pycache__/telegram_bot.cpython-312.pyc
Normal file
Binary file not shown.
91
src/hermes_bridge.py
Normal file
91
src/hermes_bridge.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Hermes Bridge
|
||||
Thin integration with Hermes profile system
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
import yaml
|
||||
|
||||
logger = logging.getLogger("archon-kion.hermes")
|
||||
|
||||
|
||||
class HermesBridge:
|
||||
"""Bridge to Hermes profile system - THIN, no reasoning logic"""
|
||||
|
||||
def __init__(self, profile_path: str = "../hermes-profile/profile.yaml"):
|
||||
self.profile_path = profile_path
|
||||
self.profile: Dict[str, Any] = {}
|
||||
self._load_profile()
|
||||
|
||||
def _load_profile(self):
|
||||
"""Load Hermes profile from YAML"""
|
||||
try:
|
||||
with open(self.profile_path, 'r') as f:
|
||||
self.profile = yaml.safe_load(f) or {}
|
||||
logger.info(f"Loaded Hermes profile: {self.profile.get('identity', {}).get('name', 'unknown')}")
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Profile not found at {self.profile_path}, using defaults")
|
||||
self.profile = self._default_profile()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load profile: {e}")
|
||||
self.profile = self._default_profile()
|
||||
|
||||
def _default_profile(self) -> Dict[str, Any]:
|
||||
"""Default profile if file not found"""
|
||||
return {
|
||||
"identity": {
|
||||
"name": "Archon Kion",
|
||||
"role": "Local AI Assistant"
|
||||
},
|
||||
"constraints": {
|
||||
"local_only": True,
|
||||
"model": "gemma3:4b"
|
||||
},
|
||||
"routing": {
|
||||
"tag": "#archon-kion"
|
||||
}
|
||||
}
|
||||
|
||||
def get_system_prompt(self) -> str:
|
||||
"""Get system prompt from profile"""
|
||||
identity = self.profile.get('identity', {})
|
||||
constraints = self.profile.get('constraints', {})
|
||||
|
||||
name = identity.get('name', 'Archon Kion')
|
||||
role = identity.get('role', 'Local AI Assistant')
|
||||
|
||||
prompt_parts = [
|
||||
f"You are {name}, {role}.",
|
||||
"You run entirely locally via Ollama.",
|
||||
"You are part of the Hermes system.",
|
||||
]
|
||||
|
||||
if constraints.get('local_only'):
|
||||
prompt_parts.append("You operate without internet access, using only local resources.")
|
||||
|
||||
# Add any custom instructions from profile
|
||||
instructions = identity.get('instructions', [])
|
||||
if instructions:
|
||||
prompt_parts.extend(instructions)
|
||||
|
||||
return "\n".join(prompt_parts)
|
||||
|
||||
def get_identity(self) -> Dict[str, Any]:
|
||||
"""Get identity information"""
|
||||
return self.profile.get('identity', {})
|
||||
|
||||
def get_constraints(self) -> Dict[str, Any]:
|
||||
"""Get constraints"""
|
||||
return self.profile.get('constraints', {})
|
||||
|
||||
def get_routing_tag(self) -> str:
|
||||
"""Get routing tag"""
|
||||
return self.profile.get('routing', {}).get('tag', '#archon-kion')
|
||||
|
||||
def should_handle(self, message: str) -> bool:
|
||||
"""Check if this message should be handled by Kion"""
|
||||
tag = self.get_routing_tag()
|
||||
return tag in message or message.startswith('/')
|
||||
188
src/main.py
Normal file
188
src/main.py
Normal file
@@ -0,0 +1,188 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Archon Kion - Daemon entry point
|
||||
Local AI assistant with Hermes integration
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Optional
|
||||
|
||||
import yaml
|
||||
from fastapi import FastAPI, HTTPException, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from ollama_client import OllamaClient
|
||||
from telegram_bot import TelegramBot
|
||||
from hermes_bridge import HermesBridge
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger("archon-kion")
|
||||
|
||||
class ArchonKion:
|
||||
"""Main daemon class orchestrating all components"""
|
||||
|
||||
def __init__(self, config_path: str = "../config/archon-kion.yaml"):
|
||||
self.config = self._load_config(config_path)
|
||||
self.ollama: Optional[OllamaClient] = None
|
||||
self.telegram: Optional[TelegramBot] = None
|
||||
self.hermes: Optional[HermesBridge] = None
|
||||
self.memory: dict = {}
|
||||
|
||||
def _load_config(self, path: str) -> dict:
|
||||
"""Load YAML configuration with env substitution"""
|
||||
with open(path, 'r') as f:
|
||||
content = f.read()
|
||||
# Simple env substitution
|
||||
for key, value in os.environ.items():
|
||||
content = content.replace(f'${{{key}}}', value)
|
||||
return yaml.safe_load(content)
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize all components"""
|
||||
logger.info("Initializing Archon Kion...")
|
||||
|
||||
# Initialize Ollama client
|
||||
ollama_cfg = self.config.get('ollama', {})
|
||||
self.ollama = OllamaClient(
|
||||
host=ollama_cfg.get('host', 'localhost'),
|
||||
port=ollama_cfg.get('port', 11434),
|
||||
model=ollama_cfg.get('model', 'gemma3:4b')
|
||||
)
|
||||
|
||||
# Initialize Hermes bridge
|
||||
hermes_cfg = self.config.get('hermes', {})
|
||||
self.hermes = HermesBridge(
|
||||
profile_path=hermes_cfg.get('profile_path', '../hermes-profile/profile.yaml')
|
||||
)
|
||||
|
||||
# Initialize Telegram bot
|
||||
telegram_cfg = self.config.get('telegram', {})
|
||||
self.telegram = TelegramBot(
|
||||
token=telegram_cfg.get('token', ''),
|
||||
webhook_url=telegram_cfg.get('webhook_url', ''),
|
||||
ollama_client=self.ollama,
|
||||
hermes_bridge=self.hermes,
|
||||
memory=self.memory
|
||||
)
|
||||
|
||||
# Test Ollama connection
|
||||
if await self.ollama.health_check():
|
||||
logger.info("✓ Ollama connection established")
|
||||
else:
|
||||
logger.warning("✗ Ollama not available")
|
||||
|
||||
logger.info("Archon Kion initialized")
|
||||
|
||||
async def shutdown(self):
|
||||
"""Graceful shutdown"""
|
||||
logger.info("Shutting down Archon Kion...")
|
||||
if self.ollama:
|
||||
await self.ollama.close()
|
||||
logger.info("Archon Kion stopped")
|
||||
|
||||
|
||||
# Global daemon instance
|
||||
archon: Optional[ArchonKion] = None
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""Application lifespan manager"""
|
||||
global archon
|
||||
archon = ArchonKion()
|
||||
await archon.initialize()
|
||||
yield
|
||||
await archon.shutdown()
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="Archon Kion",
|
||||
description="Local AI assistant daemon",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
if not archon or not archon.ollama:
|
||||
return JSONResponse(
|
||||
status_code=503,
|
||||
content={"status": "unhealthy", "reason": "not_initialized"}
|
||||
)
|
||||
|
||||
ollama_healthy = await archon.ollama.health_check()
|
||||
return {
|
||||
"status": "healthy" if ollama_healthy else "degraded",
|
||||
"ollama": "connected" if ollama_healthy else "disconnected",
|
||||
"model": archon.ollama.model if ollama_healthy else None
|
||||
}
|
||||
|
||||
|
||||
@app.post("/webhook/telegram")
|
||||
async def telegram_webhook(request: Request):
|
||||
"""Telegram webhook endpoint"""
|
||||
if not archon or not archon.telegram:
|
||||
raise HTTPException(status_code=503, detail="Service not initialized")
|
||||
|
||||
data = await request.json()
|
||||
response = await archon.telegram.handle_update(data)
|
||||
return response or {"ok": True}
|
||||
|
||||
|
||||
@app.post("/webhook/gitea")
|
||||
async def gitea_webhook(request: Request):
|
||||
"""Gitea webhook endpoint"""
|
||||
if not archon:
|
||||
raise HTTPException(status_code=503, detail="Service not initialized")
|
||||
|
||||
data = await request.json()
|
||||
event_type = request.headers.get('X-Gitea-Event', 'unknown')
|
||||
logger.info(f"Received Gitea webhook: {event_type}")
|
||||
|
||||
# Process Gitea events
|
||||
return {"ok": True, "event": event_type}
|
||||
|
||||
|
||||
@app.get("/memory/{chat_id}")
|
||||
async def get_memory(chat_id: str):
|
||||
"""Get conversation memory for a chat"""
|
||||
if not archon:
|
||||
raise HTTPException(status_code=503, detail="Service not initialized")
|
||||
|
||||
memory = archon.memory.get(chat_id, [])
|
||||
return {"chat_id": chat_id, "messages": memory}
|
||||
|
||||
|
||||
@app.delete("/memory/{chat_id}")
|
||||
async def clear_memory(chat_id: str):
|
||||
"""Clear conversation memory for a chat"""
|
||||
if not archon:
|
||||
raise HTTPException(status_code=503, detail="Service not initialized")
|
||||
|
||||
if chat_id in archon.memory:
|
||||
archon.memory[chat_id] = []
|
||||
return {"ok": True, "chat_id": chat_id}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
config_path = sys.argv[1] if len(sys.argv) > 1 else "../config/archon-kion.yaml"
|
||||
os.environ['CONFIG_PATH'] = config_path
|
||||
|
||||
uvicorn.run(
|
||||
"main:app",
|
||||
host="0.0.0.0",
|
||||
port=8080,
|
||||
reload=False,
|
||||
log_level="info"
|
||||
)
|
||||
131
src/ollama_client.py
Normal file
131
src/ollama_client.py
Normal file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Ollama API Client
|
||||
Handles communication with local Ollama instance
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import AsyncGenerator, Optional, List, Dict, Any
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger("archon-kion.ollama")
|
||||
|
||||
|
||||
class OllamaClient:
|
||||
"""Client for Ollama API"""
|
||||
|
||||
def __init__(self, host: str = "localhost", port: int = 11434, model: str = "gemma3:4b"):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.model = model
|
||||
self.base_url = f"http://{host}:{port}/api"
|
||||
self.client = httpx.AsyncClient(timeout=60.0)
|
||||
|
||||
async def health_check(self) -> bool:
|
||||
"""Check if Ollama is available"""
|
||||
try:
|
||||
response = await self.client.get(f"{self.base_url}/tags")
|
||||
if response.status_code == 200:
|
||||
models = response.json().get('models', [])
|
||||
available = [m['name'] for m in models]
|
||||
logger.debug(f"Available models: {available}")
|
||||
return self.model in available or any(self.model in m for m in available)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.warning(f"Ollama health check failed: {e}")
|
||||
return False
|
||||
|
||||
async def generate(
|
||||
self,
|
||||
prompt: str,
|
||||
system: Optional[str] = None,
|
||||
context: Optional[List[Dict[str, str]]] = None,
|
||||
stream: bool = False
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""Generate text from prompt"""
|
||||
|
||||
messages = []
|
||||
|
||||
if system:
|
||||
messages.append({"role": "system", "content": system})
|
||||
|
||||
if context:
|
||||
messages.extend(context)
|
||||
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
"stream": stream,
|
||||
"options": {
|
||||
"temperature": 0.7,
|
||||
"num_ctx": 4096
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
if stream:
|
||||
async with self.client.stream(
|
||||
"POST",
|
||||
f"{self.base_url}/chat",
|
||||
json=payload
|
||||
) as response:
|
||||
async for line in response.aiter_lines():
|
||||
if line:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
if 'message' in data and 'content' in data['message']:
|
||||
yield data['message']['content']
|
||||
if data.get('done', False):
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
else:
|
||||
response = await self.client.post(
|
||||
f"{self.base_url}/chat",
|
||||
json=payload
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
if 'message' in data and 'content' in data['message']:
|
||||
yield data['message']['content']
|
||||
else:
|
||||
yield "Error: Unexpected response format"
|
||||
|
||||
except httpx.HTTPError as e:
|
||||
logger.error(f"Ollama HTTP error: {e}")
|
||||
yield f"Error: Failed to connect to Ollama ({e})"
|
||||
except Exception as e:
|
||||
logger.error(f"Ollama error: {e}")
|
||||
yield f"Error: {str(e)}"
|
||||
|
||||
async def generate_sync(
|
||||
self,
|
||||
prompt: str,
|
||||
system: Optional[str] = None,
|
||||
context: Optional[List[Dict[str, str]]] = None
|
||||
) -> str:
|
||||
"""Generate text synchronously (non-streaming)"""
|
||||
result = []
|
||||
async for chunk in self.generate(prompt, system, context, stream=False):
|
||||
result.append(chunk)
|
||||
return ''.join(result)
|
||||
|
||||
async def list_models(self) -> List[str]:
|
||||
"""List available models"""
|
||||
try:
|
||||
response = await self.client.get(f"{self.base_url}/tags")
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return [m['name'] for m in data.get('models', [])]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list models: {e}")
|
||||
return []
|
||||
|
||||
async def close(self):
|
||||
"""Close HTTP client"""
|
||||
await self.client.aclose()
|
||||
240
src/telegram_bot.py
Normal file
240
src/telegram_bot.py
Normal file
@@ -0,0 +1,240 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Telegram Bot Handler
|
||||
Processes Telegram webhooks and commands
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger("archon-kion.telegram")
|
||||
|
||||
|
||||
class TelegramBot:
|
||||
"""Telegram bot integration"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
token: str,
|
||||
webhook_url: str,
|
||||
ollama_client: 'OllamaClient',
|
||||
hermes_bridge: 'HermesBridge',
|
||||
memory: Dict[str, List[Dict[str, str]]]
|
||||
):
|
||||
self.token = token
|
||||
self.webhook_url = webhook_url
|
||||
self.ollama = ollama_client
|
||||
self.hermes = hermes_bridge
|
||||
self.memory = memory
|
||||
self.api_url = f"https://api.telegram.org/bot{token}"
|
||||
self.http = httpx.AsyncClient(timeout=30.0)
|
||||
|
||||
async def handle_update(self, update: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""Process incoming Telegram update"""
|
||||
logger.debug(f"Received update: {update}")
|
||||
|
||||
if 'message' not in update:
|
||||
return None
|
||||
|
||||
message = update['message']
|
||||
chat_id = message.get('chat', {}).get('id')
|
||||
text = message.get('text', '')
|
||||
user = message.get('from', {})
|
||||
user_id = user.get('id')
|
||||
|
||||
if not chat_id or not text:
|
||||
return None
|
||||
|
||||
chat_id_str = str(chat_id)
|
||||
|
||||
# Initialize memory for this chat
|
||||
if chat_id_str not in self.memory:
|
||||
self.memory[chat_id_str] = []
|
||||
|
||||
# Process commands
|
||||
if text.startswith('/'):
|
||||
return await self._handle_command(chat_id, text, user_id)
|
||||
|
||||
# Process regular message through Ollama
|
||||
return await self._handle_message(chat_id, text, chat_id_str)
|
||||
|
||||
async def _handle_command(self, chat_id: int, text: str, user_id: Optional[int]) -> Dict[str, Any]:
|
||||
"""Handle bot commands"""
|
||||
parts = text.split()
|
||||
command = parts[0].lower()
|
||||
args = ' '.join(parts[1:]) if len(parts) > 1 else ''
|
||||
|
||||
chat_id_str = str(chat_id)
|
||||
|
||||
if command == '/status':
|
||||
return await self._send_message(
|
||||
chat_id,
|
||||
await self._get_status_message()
|
||||
)
|
||||
|
||||
elif command == '/memory':
|
||||
return await self._send_message(
|
||||
chat_id,
|
||||
self._get_memory_status(chat_id_str)
|
||||
)
|
||||
|
||||
elif command == '/clear':
|
||||
if chat_id_str in self.memory:
|
||||
self.memory[chat_id_str] = []
|
||||
return await self._send_message(chat_id, "🧹 Memory cleared.")
|
||||
|
||||
elif command == '/query':
|
||||
if not args:
|
||||
return await self._send_message(
|
||||
chat_id,
|
||||
"Usage: /query <your question>"
|
||||
)
|
||||
return await self._handle_message(chat_id, args, chat_id_str)
|
||||
|
||||
elif command == '/help':
|
||||
return await self._send_message(
|
||||
chat_id,
|
||||
self._get_help_message()
|
||||
)
|
||||
|
||||
elif command == '/models':
|
||||
models = await self.ollama.list_models()
|
||||
model_list = '\n'.join(f'• {m}' for m in models) if models else 'No models found'
|
||||
return await self._send_message(
|
||||
chat_id,
|
||||
f"📦 Available Models:\n{model_list}"
|
||||
)
|
||||
|
||||
else:
|
||||
return await self._send_message(
|
||||
chat_id,
|
||||
f"Unknown command: {command}\nUse /help for available commands."
|
||||
)
|
||||
|
||||
async def _handle_message(self, chat_id: int, text: str, chat_id_str: str) -> Dict[str, Any]:
|
||||
"""Process message through Ollama"""
|
||||
# Send "typing" indicator
|
||||
await self._send_chat_action(chat_id, 'typing')
|
||||
|
||||
# Get system prompt from Hermes profile
|
||||
system_prompt = self.hermes.get_system_prompt()
|
||||
|
||||
# Get conversation context
|
||||
context = self.memory.get(chat_id_str, [])
|
||||
|
||||
# Generate response
|
||||
response_text = ""
|
||||
async for chunk in self.ollama.generate(
|
||||
prompt=text,
|
||||
system=system_prompt,
|
||||
context=context,
|
||||
stream=False
|
||||
):
|
||||
response_text += chunk
|
||||
|
||||
# Update memory
|
||||
self.memory[chat_id_str].append({"role": "user", "content": text})
|
||||
self.memory[chat_id_str].append({"role": "assistant", "content": response_text})
|
||||
|
||||
# Trim memory to last 20 messages (10 exchanges)
|
||||
if len(self.memory[chat_id_str]) > 20:
|
||||
self.memory[chat_id_str] = self.memory[chat_id_str][-20:]
|
||||
|
||||
return await self._send_message(chat_id, response_text)
|
||||
|
||||
async def _send_message(self, chat_id: int, text: str) -> Dict[str, Any]:
|
||||
"""Send message to Telegram"""
|
||||
try:
|
||||
response = await self.http.post(
|
||||
f"{self.api_url}/sendMessage",
|
||||
json={
|
||||
"chat_id": chat_id,
|
||||
"text": text,
|
||||
"parse_mode": "Markdown"
|
||||
}
|
||||
)
|
||||
response.raise_for_status()
|
||||
return {"ok": True}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send message: {e}")
|
||||
return {"ok": False, "error": str(e)}
|
||||
|
||||
async def _send_chat_action(self, chat_id: int, action: str):
|
||||
"""Send chat action (typing, etc.)"""
|
||||
try:
|
||||
await self.http.post(
|
||||
f"{self.api_url}/sendChatAction",
|
||||
json={"chat_id": chat_id, "action": action}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to send chat action: {e}")
|
||||
|
||||
async def _get_status_message(self) -> str:
|
||||
"""Generate status message"""
|
||||
ollama_ok = await self.ollama.health_check()
|
||||
identity = self.hermes.get_identity()
|
||||
|
||||
status = "✅ Online" if ollama_ok else "❌ Ollama unavailable"
|
||||
|
||||
return (
|
||||
f"🤖 *{identity.get('name', 'Archon Kion')}*\n"
|
||||
f"Status: {status}\n"
|
||||
f"Model: `{self.ollama.model}`\n"
|
||||
f"Tag: `{self.hermes.get_routing_tag()}`\n"
|
||||
f"Local-only: {self.hermes.get_constraints().get('local_only', True)}"
|
||||
)
|
||||
|
||||
def _get_memory_status(self, chat_id_str: str) -> str:
|
||||
"""Get memory status for chat"""
|
||||
messages = self.memory.get(chat_id_str, [])
|
||||
user_msgs = sum(1 for m in messages if m.get('role') == 'user')
|
||||
|
||||
return (
|
||||
f"🧠 *Memory Status*\n"
|
||||
f"Messages stored: {len(messages)}\n"
|
||||
f"User messages: {user_msgs}\n"
|
||||
f"Context depth: {len(messages) // 2} exchanges"
|
||||
)
|
||||
|
||||
def _get_help_message(self) -> str:
|
||||
"""Generate help message"""
|
||||
identity = self.hermes.get_identity()
|
||||
|
||||
return (
|
||||
f"🤖 *{identity.get('name', 'Archon Kion')}* - Commands:\n\n"
|
||||
f"/status - Check daemon status\n"
|
||||
f"/memory - Show conversation memory\n"
|
||||
f"/clear - Clear conversation memory\n"
|
||||
f"/query <text> - Send query to Ollama\n"
|
||||
f"/models - List available models\n"
|
||||
f"/help - Show this help\n\n"
|
||||
f"Or just send a message to chat!"
|
||||
)
|
||||
|
||||
async def set_webhook(self) -> bool:
|
||||
"""Set Telegram webhook"""
|
||||
if not self.webhook_url:
|
||||
logger.warning("No webhook URL configured")
|
||||
return False
|
||||
|
||||
try:
|
||||
response = await self.http.post(
|
||||
f"{self.api_url}/setWebhook",
|
||||
json={"url": self.webhook_url}
|
||||
)
|
||||
data = response.json()
|
||||
if data.get('ok'):
|
||||
logger.info(f"Webhook set: {self.webhook_url}")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to set webhook: {data}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to set webhook: {e}")
|
||||
return False
|
||||
|
||||
async def close(self):
|
||||
"""Close HTTP client"""
|
||||
await self.http.aclose()
|
||||
33
systemd/archon-kion.service
Normal file
33
systemd/archon-kion.service
Normal file
@@ -0,0 +1,33 @@
|
||||
[Unit]
|
||||
Description=Archon Kion - Local AI Assistant Daemon
|
||||
After=network.target ollama.service
|
||||
Wants=ollama.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=archon
|
||||
Group=archon
|
||||
WorkingDirectory=/opt/archon-kion/src
|
||||
Environment="PYTHONPATH=/opt/archon-kion/src"
|
||||
Environment="CONFIG_PATH=/opt/archon-kion/config/archon-kion.yaml"
|
||||
EnvironmentFile=-/opt/archon-kion/.env
|
||||
|
||||
ExecStart=/usr/bin/python3 /opt/archon-kion/src/main.py
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Security hardening
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/var/log/archon-kion
|
||||
|
||||
# Resource limits
|
||||
LimitAS=1G
|
||||
LimitRSS=512M
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
1
tests/__init__.py
Normal file
1
tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Tests package
|
||||
BIN
tests/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
tests/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
tests/__pycache__/test_archon.cpython-312-pytest-9.0.2.pyc
Normal file
BIN
tests/__pycache__/test_archon.cpython-312-pytest-9.0.2.pyc
Normal file
Binary file not shown.
260
tests/test_archon.py
Normal file
260
tests/test_archon.py
Normal file
@@ -0,0 +1,260 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Unit tests for Archon Kion
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
# Add src to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
|
||||
|
||||
from ollama_client import OllamaClient
|
||||
from hermes_bridge import HermesBridge
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Fixtures
|
||||
# ============================================================================
|
||||
|
||||
@pytest.fixture
|
||||
def temp_profile():
|
||||
"""Create temporary profile file"""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
profile = {
|
||||
'identity': {'name': 'Test Kion', 'role': 'Test Assistant'},
|
||||
'constraints': {'local_only': True, 'model': 'test-model'},
|
||||
'routing': {'tag': '#test-archon'}
|
||||
}
|
||||
yaml.dump(profile, f)
|
||||
path = f.name
|
||||
yield path
|
||||
os.unlink(path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_config():
|
||||
"""Create temporary config file"""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
config = {
|
||||
'ollama': {'host': 'localhost', 'port': 11434, 'model': 'gemma3:4b'},
|
||||
'telegram': {'token': 'test-token', 'webhook_url': 'http://test/webhook'},
|
||||
'hermes': {'profile_path': '/tmp/test-profile.yaml'}
|
||||
}
|
||||
yaml.dump(config, f)
|
||||
path = f.name
|
||||
yield path
|
||||
os.unlink(path)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Ollama Client Tests
|
||||
# ============================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_client_initialization():
|
||||
"""Test OllamaClient can be initialized"""
|
||||
client = OllamaClient(host="localhost", port=11434, model="gemma3:4b")
|
||||
assert client.host == "localhost"
|
||||
assert client.port == 11434
|
||||
assert client.model == "gemma3:4b"
|
||||
assert client.base_url == "http://localhost:11434/api"
|
||||
await client.close()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_health_check():
|
||||
"""Test Ollama health check (requires running Ollama)"""
|
||||
client = OllamaClient()
|
||||
# This will fail if Ollama not running, but tests the method
|
||||
result = await client.health_check()
|
||||
# Result depends on whether Ollama is running
|
||||
assert isinstance(result, bool)
|
||||
await client.close()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_generate_sync():
|
||||
"""Test synchronous generation (requires Ollama)"""
|
||||
client = OllamaClient()
|
||||
|
||||
# Only test if Ollama is available
|
||||
if await client.health_check():
|
||||
response = await client.generate_sync("Say 'test' only.")
|
||||
assert isinstance(response, str)
|
||||
assert len(response) > 0
|
||||
|
||||
await client.close()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_list_models():
|
||||
"""Test listing models (requires Ollama)"""
|
||||
client = OllamaClient()
|
||||
|
||||
models = await client.list_models()
|
||||
assert isinstance(models, list)
|
||||
|
||||
# If Ollama is running, should have models
|
||||
if await client.health_check():
|
||||
assert len(models) > 0
|
||||
assert any('gemma' in m for m in models)
|
||||
|
||||
await client.close()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Hermes Bridge Tests
|
||||
# ============================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_hermes_bridge_initialization(temp_profile):
|
||||
"""Test HermesBridge loads profile"""
|
||||
bridge = HermesBridge(profile_path=temp_profile)
|
||||
|
||||
identity = bridge.get_identity()
|
||||
assert identity['name'] == 'Test Kion'
|
||||
assert identity['role'] == 'Test Assistant'
|
||||
|
||||
constraints = bridge.get_constraints()
|
||||
assert constraints['local_only'] is True
|
||||
|
||||
assert bridge.get_routing_tag() == '#test-archon'
|
||||
|
||||
|
||||
def test_hermes_system_prompt(temp_profile):
|
||||
"""Test system prompt generation"""
|
||||
bridge = HermesBridge(profile_path=temp_profile)
|
||||
prompt = bridge.get_system_prompt()
|
||||
|
||||
assert 'Test Kion' in prompt
|
||||
assert 'Test Assistant' in prompt
|
||||
assert 'local' in prompt.lower()
|
||||
|
||||
|
||||
def test_hermes_should_handle(temp_profile):
|
||||
"""Test message routing logic"""
|
||||
bridge = HermesBridge(profile_path=temp_profile)
|
||||
|
||||
# Should handle commands
|
||||
assert bridge.should_handle('/status') is True
|
||||
|
||||
# Should handle tagged messages
|
||||
assert bridge.should_handle('Hello #test-archon') is True
|
||||
|
||||
# Should not handle regular messages
|
||||
assert bridge.should_handle('Hello world') is False
|
||||
|
||||
|
||||
def test_hermes_default_profile():
|
||||
"""Test default profile when file missing"""
|
||||
bridge = HermesBridge(profile_path='/nonexistent/path.yaml')
|
||||
|
||||
identity = bridge.get_identity()
|
||||
assert 'name' in identity
|
||||
assert identity.get('name') == 'Archon Kion'
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Integration Tests
|
||||
# ============================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_full_pipeline():
|
||||
"""Integration test: Full pipeline (requires Ollama)"""
|
||||
client = OllamaClient()
|
||||
|
||||
# Skip if Ollama not available
|
||||
if not await client.health_check():
|
||||
pytest.skip("Ollama not available")
|
||||
|
||||
# Test generation pipeline
|
||||
response = await client.generate_sync(
|
||||
prompt="What is 2+2? Answer with just the number.",
|
||||
system="You are a helpful assistant. Be concise."
|
||||
)
|
||||
|
||||
assert '4' in response
|
||||
|
||||
await client.close()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_memory_simulation():
|
||||
"""Test memory handling in bot"""
|
||||
from telegram_bot import TelegramBot
|
||||
|
||||
# Create mock components
|
||||
memory = {}
|
||||
client = OllamaClient()
|
||||
bridge = HermesBridge(profile_path='/nonexistent.yaml')
|
||||
|
||||
bot = TelegramBot(
|
||||
token="test-token",
|
||||
webhook_url="http://test/webhook",
|
||||
ollama_client=client,
|
||||
hermes_bridge=bridge,
|
||||
memory=memory
|
||||
)
|
||||
|
||||
# Simulate message handling
|
||||
chat_id = "12345"
|
||||
if chat_id not in memory:
|
||||
memory[chat_id] = []
|
||||
|
||||
memory[chat_id].append({"role": "user", "content": "Hello"})
|
||||
memory[chat_id].append({"role": "assistant", "content": "Hi there!"})
|
||||
|
||||
assert len(memory[chat_id]) == 2
|
||||
assert memory[chat_id][0]['role'] == 'user'
|
||||
|
||||
await client.close()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Configuration Tests
|
||||
# ============================================================================
|
||||
|
||||
def test_config_loading():
|
||||
"""Test YAML config loading"""
|
||||
config_path = Path(__file__).parent.parent / "config" / "archon-kion.yaml"
|
||||
|
||||
if config_path.exists():
|
||||
with open(config_path) as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
assert 'ollama' in config
|
||||
assert 'telegram' in config
|
||||
assert 'hermes' in config
|
||||
|
||||
assert config['ollama']['model'] == 'gemma3:4b'
|
||||
|
||||
|
||||
def test_profile_loading():
|
||||
"""Test YAML profile loading"""
|
||||
profile_path = Path(__file__).parent.parent / "hermes-profile" / "profile.yaml"
|
||||
|
||||
if profile_path.exists():
|
||||
with open(profile_path) as f:
|
||||
profile = yaml.safe_load(f)
|
||||
|
||||
assert 'identity' in profile
|
||||
assert 'constraints' in profile
|
||||
assert 'routing' in profile
|
||||
|
||||
assert profile['routing']['tag'] == '#archon-kion'
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user