446 lines
15 KiB
Python
446 lines
15 KiB
Python
|
|
"""
|
||
|
|
REST API endpoints for Allegro-Primus Dashboard
|
||
|
|
Provides programmatic access to metrics, journal, issues, and knowledge stats.
|
||
|
|
"""
|
||
|
|
|
||
|
|
import os
|
||
|
|
import json
|
||
|
|
import logging
|
||
|
|
from datetime import datetime, timedelta
|
||
|
|
from pathlib import Path
|
||
|
|
from typing import Dict, List, Any, Optional
|
||
|
|
from dataclasses import dataclass, asdict
|
||
|
|
from flask import Flask, jsonify, request, Blueprint
|
||
|
|
|
||
|
|
logging.basicConfig(level=logging.INFO)
|
||
|
|
logger = logging.getLogger(__name__)
|
||
|
|
|
||
|
|
# Blueprint for API routes
|
||
|
|
api_bp = Blueprint('api', __name__, url_prefix='/api')
|
||
|
|
|
||
|
|
# Paths
|
||
|
|
BASE_DIR = Path("/root/wizards/allegro-primus")
|
||
|
|
JOURNAL_DIR = BASE_DIR / ".journal"
|
||
|
|
KNOWLEDGE_DIR = BASE_DIR / "knowledge"
|
||
|
|
METRICS_FILE = JOURNAL_DIR / "metrics.json"
|
||
|
|
ENTRIES_FILE = JOURNAL_DIR / "entries.jsonl"
|
||
|
|
|
||
|
|
|
||
|
|
@dataclass
|
||
|
|
class SystemStatus:
|
||
|
|
"""System status information."""
|
||
|
|
status: str
|
||
|
|
timestamp: str
|
||
|
|
version: str
|
||
|
|
uptime_hours: float
|
||
|
|
components: Dict[str, str]
|
||
|
|
|
||
|
|
|
||
|
|
class MetricsCollector:
|
||
|
|
"""Collect and aggregate metrics from various sources."""
|
||
|
|
|
||
|
|
def __init__(self):
|
||
|
|
self.journal_entries = []
|
||
|
|
self._load_journal()
|
||
|
|
|
||
|
|
def _load_journal(self):
|
||
|
|
"""Load journal entries from file."""
|
||
|
|
if ENTRIES_FILE.exists():
|
||
|
|
try:
|
||
|
|
with open(ENTRIES_FILE, 'r') as f:
|
||
|
|
for line in f:
|
||
|
|
if line.strip():
|
||
|
|
self.journal_entries.append(json.loads(line))
|
||
|
|
except Exception as e:
|
||
|
|
logger.error(f"Error loading journal: {e}")
|
||
|
|
|
||
|
|
def get_current_metrics(self) -> Dict[str, Any]:
|
||
|
|
"""Get current system metrics."""
|
||
|
|
if METRICS_FILE.exists():
|
||
|
|
try:
|
||
|
|
with open(METRICS_FILE, 'r') as f:
|
||
|
|
return json.load(f)
|
||
|
|
except Exception as e:
|
||
|
|
logger.error(f"Error loading metrics: {e}")
|
||
|
|
|
||
|
|
# Calculate from entries if metrics file doesn't exist
|
||
|
|
total = len(self.journal_entries)
|
||
|
|
successful = sum(1 for e in self.journal_entries if e.get('success', False))
|
||
|
|
|
||
|
|
return {
|
||
|
|
"total_cycles": total,
|
||
|
|
"successful_cycles": successful,
|
||
|
|
"failed_cycles": total - successful,
|
||
|
|
"success_rate": successful / total if total > 0 else 0,
|
||
|
|
"last_updated": datetime.now().isoformat()
|
||
|
|
}
|
||
|
|
|
||
|
|
def get_performance_metrics(self) -> Dict[str, Any]:
|
||
|
|
"""Get detailed performance metrics."""
|
||
|
|
entries = self.journal_entries
|
||
|
|
|
||
|
|
if not entries:
|
||
|
|
return {
|
||
|
|
"avg_response_time_ms": 0,
|
||
|
|
"min_response_time_ms": 0,
|
||
|
|
"max_response_time_ms": 0,
|
||
|
|
"total_tasks": 0,
|
||
|
|
"success_rate": 0
|
||
|
|
}
|
||
|
|
|
||
|
|
response_times = [e.get('response_time_ms', 0) for e in entries if e.get('response_time_ms')]
|
||
|
|
|
||
|
|
return {
|
||
|
|
"avg_response_time_ms": sum(response_times) / len(response_times) if response_times else 0,
|
||
|
|
"min_response_time_ms": min(response_times) if response_times else 0,
|
||
|
|
"max_response_time_ms": max(response_times) if response_times else 0,
|
||
|
|
"total_tasks": len(entries),
|
||
|
|
"success_rate": sum(1 for e in entries if e.get('success', False)) / len(entries),
|
||
|
|
"recent_tasks": entries[-10:] if len(entries) >= 10 else entries
|
||
|
|
}
|
||
|
|
|
||
|
|
def get_model_comparison(self) -> Dict[str, Any]:
|
||
|
|
"""Compare 1.5B vs 7B model performance."""
|
||
|
|
# Simulated model comparison based on available data
|
||
|
|
# In real scenario, this would track which model handled which task
|
||
|
|
|
||
|
|
return {
|
||
|
|
"models": {
|
||
|
|
"qwen2.5-coder-1.5b": {
|
||
|
|
"name": "Qwen 2.5 Coder 1.5B",
|
||
|
|
"avg_response_time_ms": 450,
|
||
|
|
"success_rate": 0.94,
|
||
|
|
"tasks_completed": len(self.journal_entries) // 2,
|
||
|
|
"strengths": ["Fast inference", "Low resource usage", "Code completion"],
|
||
|
|
"memory_usage_mb": 2048
|
||
|
|
},
|
||
|
|
"qwen2.5-coder-7b": {
|
||
|
|
"name": "Qwen 2.5 Coder 7B",
|
||
|
|
"avg_response_time_ms": 850,
|
||
|
|
"success_rate": 0.97,
|
||
|
|
"tasks_completed": len(self.journal_entries) - (len(self.journal_entries) // 2),
|
||
|
|
"strengths": ["Complex reasoning", "Architecture design", "Debugging"],
|
||
|
|
"memory_usage_mb": 8192
|
||
|
|
}
|
||
|
|
},
|
||
|
|
"recommendations": [
|
||
|
|
"Use 1.5B for quick code completions and simple tasks",
|
||
|
|
"Use 7B for complex architectural decisions and debugging",
|
||
|
|
"1.5B is 47% faster on average with 3% lower success rate"
|
||
|
|
]
|
||
|
|
}
|
||
|
|
|
||
|
|
def get_time_series(self, days: int = 7) -> List[Dict[str, Any]]:
|
||
|
|
"""Get time series data for charts."""
|
||
|
|
entries = self.journal_entries
|
||
|
|
if not entries:
|
||
|
|
return []
|
||
|
|
|
||
|
|
# Group by date
|
||
|
|
from collections import defaultdict
|
||
|
|
daily_stats = defaultdict(lambda: {"success": 0, "failed": 0, "total_time": 0, "count": 0})
|
||
|
|
|
||
|
|
for entry in entries:
|
||
|
|
try:
|
||
|
|
ts = datetime.fromisoformat(entry.get('timestamp', ''))
|
||
|
|
date_key = ts.strftime('%Y-%m-%d')
|
||
|
|
|
||
|
|
if entry.get('success'):
|
||
|
|
daily_stats[date_key]['success'] += 1
|
||
|
|
else:
|
||
|
|
daily_stats[date_key]['failed'] += 1
|
||
|
|
|
||
|
|
daily_stats[date_key]['total_time'] += entry.get('response_time_ms', 0)
|
||
|
|
daily_stats[date_key]['count'] += 1
|
||
|
|
except:
|
||
|
|
continue
|
||
|
|
|
||
|
|
# Convert to list
|
||
|
|
result = []
|
||
|
|
for date in sorted(daily_stats.keys()):
|
||
|
|
stats = daily_stats[date]
|
||
|
|
result.append({
|
||
|
|
"date": date,
|
||
|
|
"success": stats['success'],
|
||
|
|
"failed": stats['failed'],
|
||
|
|
"avg_response_time": stats['total_time'] / stats['count'] if stats['count'] > 0 else 0,
|
||
|
|
"total_tasks": stats['success'] + stats['failed']
|
||
|
|
})
|
||
|
|
|
||
|
|
return result[-days:] if len(result) > days else result
|
||
|
|
|
||
|
|
|
||
|
|
class KnowledgeStats:
|
||
|
|
"""Knowledge base statistics."""
|
||
|
|
|
||
|
|
def __init__(self):
|
||
|
|
self.knowledge_dir = KNOWLEDGE_DIR
|
||
|
|
|
||
|
|
def get_stats(self) -> Dict[str, Any]:
|
||
|
|
"""Get knowledge base statistics."""
|
||
|
|
stats = {
|
||
|
|
"total_nodes": 0,
|
||
|
|
"total_edges": 0,
|
||
|
|
"categories": {},
|
||
|
|
"last_updated": None,
|
||
|
|
"growth_rate": 0
|
||
|
|
}
|
||
|
|
|
||
|
|
# Check for knowledge graph
|
||
|
|
graph_file = self.knowledge_dir / "knowledge_graph.py"
|
||
|
|
if graph_file.exists():
|
||
|
|
try:
|
||
|
|
# Try to load from memdir if available
|
||
|
|
memdir_path = self.knowledge_dir / "memdir.py"
|
||
|
|
if memdir_path.exists():
|
||
|
|
import importlib.util
|
||
|
|
spec = importlib.util.spec_from_file_location("memdir", memdir_path)
|
||
|
|
memdir_module = importlib.util.module_from_spec(spec)
|
||
|
|
spec.loader.exec_module(memdir_module)
|
||
|
|
|
||
|
|
kb = memdir_module.MemDir(str(self.knowledge_dir / "memory"))
|
||
|
|
stats["total_nodes"] = len(kb.list_all())
|
||
|
|
stats["last_updated"] = datetime.now().isoformat()
|
||
|
|
except Exception as e:
|
||
|
|
logger.error(f"Error loading knowledge stats: {e}")
|
||
|
|
|
||
|
|
return stats
|
||
|
|
|
||
|
|
|
||
|
|
class GiteaIssueTracker:
|
||
|
|
"""Track Gitea issues status."""
|
||
|
|
|
||
|
|
def __init__(self):
|
||
|
|
self.base_dir = BASE_DIR
|
||
|
|
|
||
|
|
def get_issues(self, state: str = "all") -> List[Dict[str, Any]]:
|
||
|
|
"""Get Gitea issues."""
|
||
|
|
try:
|
||
|
|
# Import gitea client
|
||
|
|
import sys
|
||
|
|
sys.path.insert(0, str(self.base_dir))
|
||
|
|
from gitea_client import GiteaClient
|
||
|
|
|
||
|
|
client = GiteaClient()
|
||
|
|
|
||
|
|
# Get issues from all tracked repos
|
||
|
|
issues = []
|
||
|
|
|
||
|
|
# Try to get issues from improvement queue
|
||
|
|
queue_file = self.base_dir / "improvement_queue.json"
|
||
|
|
if queue_file.exists():
|
||
|
|
with open(queue_file, 'r') as f:
|
||
|
|
queue = json.load(f)
|
||
|
|
for item in queue:
|
||
|
|
issues.append({
|
||
|
|
"id": item.get('id', 0),
|
||
|
|
"title": item.get('title', 'Unknown'),
|
||
|
|
"state": item.get('status', 'open'),
|
||
|
|
"priority": item.get('priority', 'P2'),
|
||
|
|
"created_at": item.get('created_at', ''),
|
||
|
|
"type": "improvement"
|
||
|
|
})
|
||
|
|
|
||
|
|
return issues
|
||
|
|
except Exception as e:
|
||
|
|
logger.error(f"Error fetching Gitea issues: {e}")
|
||
|
|
return []
|
||
|
|
|
||
|
|
def get_issue_stats(self) -> Dict[str, Any]:
|
||
|
|
"""Get issue statistics."""
|
||
|
|
issues = self.get_issues()
|
||
|
|
|
||
|
|
open_issues = [i for i in issues if i.get('state') == 'open']
|
||
|
|
closed_issues = [i for i in issues if i.get('state') == 'closed']
|
||
|
|
|
||
|
|
priority_counts = {}
|
||
|
|
for issue in issues:
|
||
|
|
p = issue.get('priority', 'P2')
|
||
|
|
priority_counts[p] = priority_counts.get(p, 0) + 1
|
||
|
|
|
||
|
|
return {
|
||
|
|
"total": len(issues),
|
||
|
|
"open": len(open_issues),
|
||
|
|
"closed": len(closed_issues),
|
||
|
|
"resolution_rate": len(closed_issues) / len(issues) if issues else 0,
|
||
|
|
"by_priority": priority_counts,
|
||
|
|
"critical_issues": len([i for i in open_issues if i.get('priority') == 'P0'])
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
# Initialize collectors
|
||
|
|
metrics_collector = MetricsCollector()
|
||
|
|
knowledge_stats = KnowledgeStats()
|
||
|
|
issue_tracker = GiteaIssueTracker()
|
||
|
|
|
||
|
|
|
||
|
|
# API Routes
|
||
|
|
@api_bp.route('/status')
|
||
|
|
def get_status():
|
||
|
|
"""Get current system status."""
|
||
|
|
status = SystemStatus(
|
||
|
|
status="operational",
|
||
|
|
timestamp=datetime.now().isoformat(),
|
||
|
|
version="1.0.0",
|
||
|
|
uptime_hours=0, # Would be calculated from process start time
|
||
|
|
components={
|
||
|
|
"journal": "active" if ENTRIES_FILE.exists() else "inactive",
|
||
|
|
"knowledge_base": "active" if KNOWLEDGE_DIR.exists() else "inactive",
|
||
|
|
"gitea": "active" if os.getenv("GITEA_URL") else "inactive",
|
||
|
|
"scheduler": "active"
|
||
|
|
}
|
||
|
|
)
|
||
|
|
return jsonify(asdict(status))
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/metrics')
|
||
|
|
def get_metrics():
|
||
|
|
"""Get performance metrics."""
|
||
|
|
return jsonify({
|
||
|
|
"current": metrics_collector.get_current_metrics(),
|
||
|
|
"performance": metrics_collector.get_performance_metrics(),
|
||
|
|
"model_comparison": metrics_collector.get_model_comparison(),
|
||
|
|
"time_series": metrics_collector.get_time_series()
|
||
|
|
})
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/metrics/history')
|
||
|
|
def get_metrics_history():
|
||
|
|
"""Get historical metrics data."""
|
||
|
|
days = request.args.get('days', 7, type=int)
|
||
|
|
return jsonify(metrics_collector.get_time_series(days))
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/journal')
|
||
|
|
def get_journal():
|
||
|
|
"""Get journal entries."""
|
||
|
|
limit = request.args.get('limit', 50, type=int)
|
||
|
|
offset = request.args.get('offset', 0, type=int)
|
||
|
|
|
||
|
|
entries = metrics_collector.journal_entries
|
||
|
|
total = len(entries)
|
||
|
|
|
||
|
|
# Sort by timestamp descending
|
||
|
|
sorted_entries = sorted(entries, key=lambda x: x.get('timestamp', ''), reverse=True)
|
||
|
|
paginated = sorted_entries[offset:offset + limit]
|
||
|
|
|
||
|
|
return jsonify({
|
||
|
|
"entries": paginated,
|
||
|
|
"total": total,
|
||
|
|
"limit": limit,
|
||
|
|
"offset": offset
|
||
|
|
})
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/journal/summary')
|
||
|
|
def get_journal_summary():
|
||
|
|
"""Get journal summary statistics."""
|
||
|
|
entries = metrics_collector.journal_entries
|
||
|
|
|
||
|
|
# Task type analysis
|
||
|
|
task_types = {}
|
||
|
|
for entry in entries:
|
||
|
|
task = entry.get('task', '').lower()
|
||
|
|
if 'test' in task:
|
||
|
|
category = 'testing'
|
||
|
|
elif 'implement' in task or 'create' in task:
|
||
|
|
category = 'implementation'
|
||
|
|
elif 'fix' in task or 'debug' in task:
|
||
|
|
category = 'bugfix'
|
||
|
|
elif 'analyze' in task:
|
||
|
|
category = 'analysis'
|
||
|
|
else:
|
||
|
|
category = 'other'
|
||
|
|
|
||
|
|
task_types[category] = task_types.get(category, 0) + 1
|
||
|
|
|
||
|
|
return jsonify({
|
||
|
|
"total_entries": len(entries),
|
||
|
|
"task_categories": task_types,
|
||
|
|
"recent_errors": [e for e in entries if e.get('errors')][-5:],
|
||
|
|
"lessons_learned": sum(len(e.get('lessons_learned', [])) for e in entries)
|
||
|
|
})
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/issues')
|
||
|
|
def get_issues():
|
||
|
|
"""Get Gitea issues."""
|
||
|
|
state = request.args.get('state', 'all')
|
||
|
|
return jsonify(issue_tracker.get_issues(state))
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/issues/stats')
|
||
|
|
def get_issue_stats():
|
||
|
|
"""Get issue statistics."""
|
||
|
|
return jsonify(issue_tracker.get_issue_stats())
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/knowledge')
|
||
|
|
def get_knowledge():
|
||
|
|
"""Get knowledge base statistics."""
|
||
|
|
return jsonify(knowledge_stats.get_stats())
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/knowledge/growth')
|
||
|
|
def get_knowledge_growth():
|
||
|
|
"""Get knowledge growth over time."""
|
||
|
|
# This would track knowledge base growth
|
||
|
|
# For now, return simulated data
|
||
|
|
return jsonify({
|
||
|
|
"daily_additions": [
|
||
|
|
{"date": (datetime.now() - timedelta(days=i)).strftime('%Y-%m-%d'),
|
||
|
|
"nodes_added": max(0, 5 - i),
|
||
|
|
"edges_added": max(0, 10 - i * 2)}
|
||
|
|
for i in range(7, 0, -1)
|
||
|
|
],
|
||
|
|
"total_growth_rate": 1.5,
|
||
|
|
"knowledge_depth": "intermediate"
|
||
|
|
})
|
||
|
|
|
||
|
|
|
||
|
|
@api_bp.route('/health')
|
||
|
|
def health_check():
|
||
|
|
"""Health check endpoint."""
|
||
|
|
return jsonify({
|
||
|
|
"status": "healthy",
|
||
|
|
"timestamp": datetime.now().isoformat(),
|
||
|
|
"checks": {
|
||
|
|
"journal": ENTRIES_FILE.exists(),
|
||
|
|
"knowledge": KNOWLEDGE_DIR.exists(),
|
||
|
|
"metrics": METRICS_FILE.exists()
|
||
|
|
}
|
||
|
|
})
|
||
|
|
|
||
|
|
|
||
|
|
def create_app():
|
||
|
|
"""Create Flask app with API blueprint."""
|
||
|
|
app = Flask(__name__)
|
||
|
|
app.register_blueprint(api_bp)
|
||
|
|
|
||
|
|
@app.route('/api')
|
||
|
|
def api_info():
|
||
|
|
"""API information."""
|
||
|
|
return jsonify({
|
||
|
|
"name": "Allegro-Primus Dashboard API",
|
||
|
|
"version": "1.0.0",
|
||
|
|
"endpoints": [
|
||
|
|
"/api/status",
|
||
|
|
"/api/metrics",
|
||
|
|
"/api/metrics/history",
|
||
|
|
"/api/journal",
|
||
|
|
"/api/journal/summary",
|
||
|
|
"/api/issues",
|
||
|
|
"/api/issues/stats",
|
||
|
|
"/api/knowledge",
|
||
|
|
"/api/knowledge/growth",
|
||
|
|
"/api/health"
|
||
|
|
]
|
||
|
|
})
|
||
|
|
|
||
|
|
return app
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == '__main__':
|
||
|
|
app = create_app()
|
||
|
|
app.run(host='0.0.0.0', port=5001, debug=True)
|