413 lines
14 KiB
Python
Executable File
413 lines
14 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Allegro-Primus Self-Improvement Journal System
|
|
Logs every work cycle, tracks metrics, generates summaries.
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
from datetime import datetime, timedelta
|
|
from pathlib import Path
|
|
from typing import Dict, List, Optional, Any
|
|
from dataclasses import dataclass, asdict
|
|
from collections import defaultdict
|
|
import statistics
|
|
|
|
JOURNAL_DIR = Path("/root/wizards/allegro-primus/.journal")
|
|
JOURNAL_FILE = JOURNAL_DIR / "entries.jsonl"
|
|
METRICS_FILE = JOURNAL_DIR / "metrics.json"
|
|
SUMMARIES_DIR = JOURNAL_DIR / "summaries"
|
|
|
|
@dataclass
|
|
class JournalEntry:
|
|
timestamp: str
|
|
cycle_id: str
|
|
task: str
|
|
result: str
|
|
success: bool
|
|
response_time_ms: int
|
|
lessons_learned: List[str]
|
|
errors: List[str]
|
|
tools_used: List[str]
|
|
context: Dict[str, Any]
|
|
|
|
@dataclass
|
|
class CycleMetrics:
|
|
total_cycles: int
|
|
successful_cycles: int
|
|
failed_cycles: int
|
|
avg_response_time_ms: float
|
|
success_rate: float
|
|
total_lessons: int
|
|
common_errors: List[tuple]
|
|
top_tools: List[tuple]
|
|
|
|
class Journal:
|
|
def __init__(self):
|
|
JOURNAL_DIR.mkdir(parents=True, exist_ok=True)
|
|
SUMMARIES_DIR.mkdir(parents=True, exist_ok=True)
|
|
self._ensure_files()
|
|
|
|
def _ensure_files(self):
|
|
"""Ensure journal files exist."""
|
|
if not JOURNAL_FILE.exists():
|
|
JOURNAL_FILE.touch()
|
|
if not METRICS_FILE.exists():
|
|
self._save_metrics({
|
|
"total_cycles": 0,
|
|
"successful_cycles": 0,
|
|
"failed_cycles": 0,
|
|
"last_updated": datetime.now().isoformat()
|
|
})
|
|
|
|
def log_cycle(self,
|
|
task: str,
|
|
result: str,
|
|
success: bool = True,
|
|
response_time_ms: int = 0,
|
|
lessons_learned: List[str] = None,
|
|
errors: List[str] = None,
|
|
tools_used: List[str] = None,
|
|
context: Dict[str, Any] = None) -> str:
|
|
"""Log a work cycle."""
|
|
cycle_id = f"cycle_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{os.urandom(4).hex()}"
|
|
|
|
entry = JournalEntry(
|
|
timestamp=datetime.now().isoformat(),
|
|
cycle_id=cycle_id,
|
|
task=task[:500] if task else "",
|
|
result=result[:2000] if result else "",
|
|
success=success,
|
|
response_time_ms=response_time_ms,
|
|
lessons_learned=lessons_learned or [],
|
|
errors=errors or [],
|
|
tools_used=tools_used or [],
|
|
context=context or {}
|
|
)
|
|
|
|
# Append to journal
|
|
with open(JOURNAL_FILE, 'a') as f:
|
|
f.write(json.dumps(asdict(entry), default=str) + '\n')
|
|
|
|
# Update metrics
|
|
self._update_metrics(success)
|
|
|
|
return cycle_id
|
|
|
|
def _update_metrics(self, success: bool):
|
|
"""Update cumulative metrics."""
|
|
metrics = self._load_metrics()
|
|
metrics["total_cycles"] += 1
|
|
if success:
|
|
metrics["successful_cycles"] += 1
|
|
else:
|
|
metrics["failed_cycles"] += 1
|
|
metrics["success_rate"] = metrics["successful_cycles"] / metrics["total_cycles"]
|
|
metrics["last_updated"] = datetime.now().isoformat()
|
|
self._save_metrics(metrics)
|
|
|
|
def _load_metrics(self) -> Dict:
|
|
with open(METRICS_FILE, 'r') as f:
|
|
return json.load(f)
|
|
|
|
def _save_metrics(self, metrics: Dict):
|
|
with open(METRICS_FILE, 'w') as f:
|
|
json.dump(metrics, f, indent=2)
|
|
|
|
def get_entries(self, days: int = 7, success_only: bool = False) -> List[JournalEntry]:
|
|
"""Get journal entries from last N days."""
|
|
entries = []
|
|
cutoff = datetime.now() - timedelta(days=days)
|
|
|
|
if not JOURNAL_FILE.exists():
|
|
return entries
|
|
|
|
with open(JOURNAL_FILE, 'r') as f:
|
|
for line in f:
|
|
line = line.strip()
|
|
if not line:
|
|
continue
|
|
try:
|
|
data = json.loads(line)
|
|
entry_time = datetime.fromisoformat(data['timestamp'])
|
|
if entry_time < cutoff:
|
|
continue
|
|
if success_only and not data.get('success', True):
|
|
continue
|
|
entries.append(JournalEntry(**data))
|
|
except (json.JSONDecodeError, KeyError, ValueError):
|
|
continue
|
|
|
|
return entries
|
|
|
|
def calculate_metrics(self, days: int = 7) -> CycleMetrics:
|
|
"""Calculate metrics for specified period."""
|
|
entries = self.get_entries(days=days)
|
|
|
|
if not entries:
|
|
return CycleMetrics(0, 0, 0, 0.0, 0.0, 0, [], [])
|
|
|
|
response_times = [e.response_time_ms for e in entries if e.response_time_ms > 0]
|
|
success_count = sum(1 for e in entries if e.success)
|
|
|
|
# Count errors
|
|
error_counts = defaultdict(int)
|
|
for e in entries:
|
|
for err in e.errors:
|
|
error_counts[err[:100]] += 1
|
|
common_errors = sorted(error_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
|
|
|
# Count tools
|
|
tool_counts = defaultdict(int)
|
|
for e in entries:
|
|
for tool in e.tools_used:
|
|
tool_counts[tool] += 1
|
|
top_tools = sorted(tool_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
|
|
|
# Count lessons
|
|
total_lessons = sum(len(e.lessons_learned) for e in entries)
|
|
|
|
return CycleMetrics(
|
|
total_cycles=len(entries),
|
|
successful_cycles=success_count,
|
|
failed_cycles=len(entries) - success_count,
|
|
avg_response_time_ms=statistics.mean(response_times) if response_times else 0,
|
|
success_rate=success_count / len(entries),
|
|
total_lessons=total_lessons,
|
|
common_errors=common_errors,
|
|
top_tools=top_tools
|
|
)
|
|
|
|
def generate_daily_summary(self, date: Optional[datetime] = None) -> Dict:
|
|
"""Generate summary for a specific day."""
|
|
if date is None:
|
|
date = datetime.now()
|
|
|
|
start = date.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
end = start + timedelta(days=1)
|
|
|
|
entries = self.get_entries(days=1)
|
|
day_entries = [e for e in entries if start <= datetime.fromisoformat(e.timestamp) < end]
|
|
|
|
if not day_entries:
|
|
return {"date": start.isoformat(), "status": "no_data"}
|
|
|
|
lessons = []
|
|
for e in day_entries:
|
|
lessons.extend(e.lessons_learned)
|
|
|
|
summary = {
|
|
"date": start.isoformat(),
|
|
"total_cycles": len(day_entries),
|
|
"successful": sum(1 for e in day_entries if e.success),
|
|
"failed": sum(1 for e in day_entries if not e.success),
|
|
"lessons_learned": list(set(lessons)),
|
|
"tasks_completed": [e.task[:100] for e in day_entries if e.success][:10]
|
|
}
|
|
|
|
# Save summary
|
|
summary_file = SUMMARIES_DIR / f"daily_{start.strftime('%Y%m%d')}.json"
|
|
with open(summary_file, 'w') as f:
|
|
json.dump(summary, f, indent=2)
|
|
|
|
return summary
|
|
|
|
def generate_weekly_summary(self) -> Dict:
|
|
"""Generate weekly summary."""
|
|
metrics = self.calculate_metrics(days=7)
|
|
entries = self.get_entries(days=7)
|
|
|
|
# Extract all lessons
|
|
all_lessons = []
|
|
for e in entries:
|
|
all_lessons.extend(e.lessons_learned)
|
|
|
|
# Identify patterns
|
|
task_types = defaultdict(int)
|
|
for e in entries:
|
|
task_type = self._categorize_task(e.task)
|
|
task_types[task_type] += 1
|
|
|
|
summary = {
|
|
"period": "last_7_days",
|
|
"generated_at": datetime.now().isoformat(),
|
|
"metrics": {
|
|
"total_cycles": metrics.total_cycles,
|
|
"success_rate": round(metrics.success_rate * 100, 2),
|
|
"avg_response_time_ms": round(metrics.avg_response_time_ms, 2),
|
|
"total_lessons": metrics.total_lessons
|
|
},
|
|
"common_errors": metrics.common_errors,
|
|
"top_tools": metrics.top_tools,
|
|
"task_distribution": dict(task_types),
|
|
"key_lessons": list(set(all_lessons))[:20]
|
|
}
|
|
|
|
# Save summary
|
|
week_num = datetime.now().isocalendar()[1]
|
|
summary_file = SUMMARIES_DIR / f"weekly_{datetime.now().year}_w{week_num}.json"
|
|
with open(summary_file, 'w') as f:
|
|
json.dump(summary, f, indent=2)
|
|
|
|
return summary
|
|
|
|
def _categorize_task(self, task: str) -> str:
|
|
"""Categorize a task by type."""
|
|
task_lower = task.lower()
|
|
if 'search' in task_lower or 'find' in task_lower:
|
|
return 'search'
|
|
elif 'code' in task_lower or 'write' in task_lower or 'create' in task_lower:
|
|
return 'code_generation'
|
|
elif 'read' in task_lower or 'analyze' in task_lower:
|
|
return 'analysis'
|
|
elif 'test' in task_lower or 'verify' in task_lower:
|
|
return 'testing'
|
|
elif 'debug' in task_lower or 'fix' in task_lower:
|
|
return 'debugging'
|
|
else:
|
|
return 'other'
|
|
|
|
def get_patterns(self, days: int = 30) -> Dict:
|
|
"""Identify patterns in journal entries."""
|
|
entries = self.get_entries(days=days)
|
|
|
|
if len(entries) < 5:
|
|
return {"status": "insufficient_data", "message": "Need at least 5 entries for pattern analysis"}
|
|
|
|
# Time-based patterns
|
|
hour_success = defaultdict(lambda: {'success': 0, 'total': 0})
|
|
for e in entries:
|
|
hour = datetime.fromisoformat(e.timestamp).hour
|
|
hour_success[hour]['total'] += 1
|
|
if e.success:
|
|
hour_success[hour]['success'] += 1
|
|
|
|
# Find best performing hours
|
|
hour_rates = {
|
|
h: (v['success'] / v['total']) for h, v in hour_success.items() if v['total'] >= 3
|
|
}
|
|
best_hours = sorted(hour_rates.items(), key=lambda x: x[1], reverse=True)[:3]
|
|
|
|
# Task type success rates
|
|
task_success = defaultdict(lambda: {'success': 0, 'total': 0})
|
|
for e in entries:
|
|
task_type = self._categorize_task(e.task)
|
|
task_success[task_type]['total'] += 1
|
|
if e.success:
|
|
task_success[task_type]['success'] += 1
|
|
|
|
task_rates = {
|
|
t: (v['success'] / v['total']) for t, v in task_success.items() if v['total'] >= 3
|
|
}
|
|
|
|
return {
|
|
"analysis_period_days": days,
|
|
"total_entries": len(entries),
|
|
"best_performing_hours": best_hours,
|
|
"task_success_rates": task_rates,
|
|
"improvement_trend": self._calculate_trend(entries)
|
|
}
|
|
|
|
def _calculate_trend(self, entries: List[JournalEntry]) -> str:
|
|
"""Calculate improvement trend."""
|
|
if len(entries) < 10:
|
|
return "insufficient_data"
|
|
|
|
# Split into first and second half
|
|
mid = len(entries) // 2
|
|
first_half = entries[:mid]
|
|
second_half = entries[mid:]
|
|
|
|
first_rate = sum(1 for e in first_half if e.success) / len(first_half)
|
|
second_rate = sum(1 for e in second_half if e.success) / len(second_half)
|
|
|
|
if second_rate > first_rate + 0.1:
|
|
return "improving"
|
|
elif second_rate < first_rate - 0.1:
|
|
return "declining"
|
|
else:
|
|
return "stable"
|
|
|
|
def display_summary(self, days: int = 7):
|
|
"""Display a formatted summary."""
|
|
metrics = self.calculate_metrics(days=days)
|
|
|
|
print("\n" + "=" * 60)
|
|
print(f" ALLEGRO-PRIMUS JOURNAL SUMMARY (Last {days} days)")
|
|
print("=" * 60)
|
|
print(f" Total Cycles: {metrics.total_cycles}")
|
|
print(f" Success Rate: {metrics.success_rate*100:.1f}%")
|
|
print(f" Avg Response Time: {metrics.avg_response_time_ms:.0f}ms")
|
|
print(f" Total Lessons: {metrics.total_lessons}")
|
|
print("-" * 60)
|
|
|
|
if metrics.common_errors:
|
|
print(" Common Errors:")
|
|
for err, count in metrics.common_errors[:3]:
|
|
print(f" - {err[:50]}... ({count}x)")
|
|
|
|
if metrics.top_tools:
|
|
print(" Top Tools Used:")
|
|
for tool, count in metrics.top_tools[:3]:
|
|
print(f" - {tool}: {count}x")
|
|
|
|
print("=" * 60)
|
|
|
|
|
|
def main():
|
|
"""CLI interface for journal system."""
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description="AP Self-Improvement Journal")
|
|
parser.add_argument('action', choices=['log', 'summary', 'daily', 'weekly', 'patterns', 'stats'])
|
|
parser.add_argument('--task', '-t', help='Task description for log')
|
|
parser.add_argument('--result', '-r', help='Result for log', default='')
|
|
parser.add_argument('--success', '-s', type=lambda x: x.lower() == 'true', default=True)
|
|
parser.add_argument('--time', type=int, default=0, help='Response time in ms')
|
|
parser.add_argument('--days', '-d', type=int, default=7, help='Days for summary')
|
|
|
|
args = parser.parse_args()
|
|
|
|
journal = Journal()
|
|
|
|
if args.action == 'log':
|
|
if not args.task:
|
|
print("Error: --task required for logging")
|
|
sys.exit(1)
|
|
cycle_id = journal.log_cycle(
|
|
task=args.task,
|
|
result=args.result,
|
|
success=args.success,
|
|
response_time_ms=args.time
|
|
)
|
|
print(f"Logged cycle: {cycle_id}")
|
|
|
|
elif args.action == 'summary':
|
|
journal.display_summary(days=args.days)
|
|
|
|
elif args.action == 'daily':
|
|
summary = journal.generate_daily_summary()
|
|
print(json.dumps(summary, indent=2))
|
|
|
|
elif args.action == 'weekly':
|
|
summary = journal.generate_weekly_summary()
|
|
print(json.dumps(summary, indent=2))
|
|
|
|
elif args.action == 'patterns':
|
|
patterns = journal.get_patterns(days=args.days)
|
|
print(json.dumps(patterns, indent=2))
|
|
|
|
elif args.action == 'stats':
|
|
metrics = journal.calculate_metrics(days=args.days)
|
|
print(json.dumps({
|
|
'total_cycles': metrics.total_cycles,
|
|
'success_rate': metrics.success_rate,
|
|
'avg_response_time_ms': metrics.avg_response_time_ms,
|
|
'total_lessons': metrics.total_lessons
|
|
}, indent=2))
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|