Compare commits

..

1 Commits

5 changed files with 276 additions and 961 deletions

View File

@@ -1,554 +0,0 @@
#!/usr/bin/env python3
"""
Automation Opportunity Finder — Scan fleet for manual processes that could be automated.
Analyzes:
1. Cron jobs — finds manual steps between scheduled tasks
2. Documentation — extracts TODO/FIXME/manual-step patterns
3. Scripts — detects repeated command sequences
4. Session transcripts — finds repeated tool-call patterns
Usage:
python3 scripts/automation_opportunity_finder.py --hermes-home ~/.hermes
python3 scripts/automation_opportunity_finder.py --hermes-home ~/.hermes --json
python3 scripts/automation_opportunity_finder.py --hermes-home ~/.hermes --output proposals.json
"""
import argparse
import json
import os
import re
import sys
from collections import Counter, defaultdict
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
# ---------------------------------------------------------------------------
# Patterns that signal manual work
# ---------------------------------------------------------------------------
MANUAL_STEP_PATTERNS = [
# Explicit manual markers
(r"(?i)\bTODO[:\s]", "todo"),
(r"(?i)\bFIXME[:\s]", "fixme"),
(r"(?i)\bMANUAL[:\s]", "manual_step"),
(r"(?i)\bHACK[:\s]", "hack"),
(r"(?i)\bWORKAROUND[:\s]", "workaround"),
# Step-by-step instructions in docs
(r"(?i)^(\d+)[.\)]\s+(run|execute|ssh|scp|curl|cd|make|docker|ansible|git)", "sequential_step"),
# Explicitly manual operations
(r"(?i)\bmanually\b", "manual_keyword"),
(r"(?i)\bby hand\b", "manual_keyword"),
(r"(?i)\bdon\'?t forget to\b", "manual_keyword"),
(r"(?i)\bremember to\b", "manual_keyword"),
(r"(?i)\bmake sure to\b", "manual_keyword"),
]
# Shell commands that appear frequently in runbooks — signal automatable workflows
SHELL_COMMAND_PATTERNS = [
"ssh ", "scp ", "rsync ", "curl ", "wget ",
"docker ", "docker-compose ", "kubectl ",
"ansible-playbook ", "terraform ", "systemctl ",
"systemctl restart", "systemctl status",
"git push", "git pull", "git merge", "git checkout",
"pip install", "npm install", "cargo build",
]
# Directories to skip during scans — large/uninteresting trees
EXCLUDE_DIRS = frozenset({
"node_modules", "venv", ".venv", "__pycache__", ".git",
"site-packages", "dist", "build", ".tox", ".mypy_cache",
".pytest_cache", "coverage", ".next", "vendor",
"skills", # hermes skills dir is huge
"audio_cache", "skins", "profiles",
})
# Session tool calls that appear repeatedly — candidates for workflow automation
TOOL_SEQUENCE_MIN_OCCURRENCES = 3
# ---------------------------------------------------------------------------
# Analyzers
# ---------------------------------------------------------------------------
def analyze_cron_jobs(hermes_home: str) -> List[Dict[str, Any]]:
"""Analyze cron job definitions for automation gaps."""
proposals = []
cron_dir = Path(hermes_home) / "cron"
jobs_file = cron_dir / "jobs.json"
if not jobs_file.exists():
# Try YAML format
for ext in (".yaml", ".yml"):
alt = cron_dir / f"jobs{ext}"
if alt.exists():
jobs_file = alt
break
if not jobs_file.exists():
return proposals
try:
if jobs_file.suffix == ".json":
with open(jobs_file) as f:
jobs = json.load(f)
else:
try:
import yaml
with open(jobs_file) as f:
jobs = yaml.safe_load(f)
except ImportError:
return proposals
except (json.JSONDecodeError, Exception):
return proposals
if not isinstance(jobs, list):
return proposals
# Look for disabled jobs (someone turned them off — might need a different approach)
disabled = [j for j in jobs if not j.get("enabled", True)]
if disabled:
names = [j.get("name", j.get("id", "?")) for j in disabled[:5]]
proposals.append({
"category": "cron_disabled",
"title": f"{len(disabled)} disabled cron job(s) may need automation rework",
"description": f"These jobs were disabled: {', '.join(names)}. Investigate why and whether a different automation approach is needed.",
"confidence": 0.7,
"impact": "medium",
"sources": [str(jobs_file)],
})
# Look for jobs with high error counts
error_jobs = [j for j in jobs if j.get("last_status") == "error"]
if error_jobs:
names = [j.get("name", j.get("id", "?")) for j in error_jobs[:5]]
proposals.append({
"category": "cron_errors",
"title": f"{len(error_jobs)} cron job(s) failing — may need automation rework",
"description": f"Jobs with errors: {', '.join(names)}. Failure patterns suggest missing dependencies or fragile automation.",
"confidence": 0.8,
"impact": "high",
"sources": [str(jobs_file)],
})
# Look for jobs with delivery errors (platform issues)
delivery_errors = [j for j in jobs if j.get("last_delivery_error")]
if delivery_errors:
proposals.append({
"category": "cron_delivery",
"title": f"{len(delivery_errors)} cron job(s) have delivery failures",
"description": "Delivery failures suggest missing retry logic or platform integration gaps.",
"confidence": 0.75,
"impact": "medium",
"sources": [str(jobs_file)],
})
# Look for jobs on short intervals that could be event-driven
for job in jobs:
schedule = job.get("schedule", "")
# Check for very frequent schedules (every minute, every 5 min)
if isinstance(schedule, str) and re.match(r"^\*\/([1-5])\s", schedule):
proposals.append({
"category": "cron_frequency",
"title": f"Job '{job.get('name', job.get('id', '?'))}' runs every {schedule.split()[0]} — consider event-driven",
"description": f"High-frequency cron ({schedule}) may be better as event-driven or daemon.",
"confidence": 0.6,
"impact": "low",
"sources": [str(jobs_file)],
})
return proposals
def analyze_documents(root_dirs: List[str]) -> List[Dict[str, Any]]:
"""Scan documentation for manual step patterns."""
proposals = []
doc_extensions = {".md", ".txt", ".rst", ".adoc"}
findings_by_category = defaultdict(list)
for root_dir in root_dirs:
root = Path(root_dir)
if not root.exists():
continue
for path in root.rglob("*"):
if path.is_dir():
continue
if path.suffix not in doc_extensions:
continue
# Skip excluded dirs and hidden dirs
parts = path.relative_to(root).parts if root in path.parents or root == path.parent else path.parts
if any(p.startswith(".") or p in EXCLUDE_DIRS for p in parts):
continue
if len(parts) > 8:
continue
try:
content = path.read_text(errors="replace")
except (PermissionError, OSError):
continue
lines = content.split("\n")
for i, line in enumerate(lines):
stripped = line.strip()
if not stripped:
continue
for pattern, category in MANUAL_STEP_PATTERNS:
if re.search(pattern, stripped):
findings_by_category[category].append({
"file": str(path),
"line": i + 1,
"text": stripped[:200],
})
# Generate proposals from findings
for category, findings in findings_by_category.items():
if len(findings) < 2:
continue
file_count = len(set(f["file"] for f in findings))
proposals.append({
"category": f"manual_{category}",
"title": f"{len(findings)} '{category}' markers across {file_count} doc(s)",
"description": f"Found in: {', '.join(set(Path(f['file']).name for f in findings[:5]))}",
"confidence": 0.65,
"impact": "medium",
"sources": list(set(f["file"] for f in findings[:10])),
"details": findings[:5], # sample
})
return proposals
def analyze_scripts(root_dirs: List[str]) -> List[Dict[str, Any]]:
"""Detect repeated command sequences in scripts."""
proposals = []
script_extensions = {".py", ".sh", ".bash", ".zsh"}
command_counter = Counter()
command_locations = defaultdict(list)
for root_dir in root_dirs:
root = Path(root_dir)
if not root.exists():
continue
for path in root.rglob("*"):
if path.is_dir():
continue
if path.suffix not in script_extensions:
continue
parts = path.relative_to(root).parts if root in path.parents or root == path.parent else path.parts
if any(p.startswith(".") or p in EXCLUDE_DIRS for p in parts):
continue
if len(parts) > 8:
continue
try:
content = path.read_text(errors="replace")
except (PermissionError, OSError):
continue
lines = content.split("\n")
for i, line in enumerate(lines):
stripped = line.strip()
if not stripped or stripped.startswith("#"):
continue
for cmd_prefix in SHELL_COMMAND_PATTERNS:
if cmd_prefix in stripped:
# Normalize the command
normalized = re.sub(r"\s+", " ", stripped)[:120]
command_counter[normalized] += 1
command_locations[normalized].append(f"{path}:{i+1}")
# Proposals for commands appearing 3+ times
for cmd, count in command_counter.most_common(20):
if count < 3:
break
locs = command_locations[cmd]
file_count = len(set(loc.split(":")[0] for loc in locs))
proposals.append({
"category": "repeated_command",
"title": f"Command repeated {count}x across {file_count} file(s): {cmd[:80]}",
"description": f"Locations: {', '.join(locs[:3])}",
"confidence": min(0.5 + (count * 0.1), 0.95),
"impact": "medium",
"sources": list(set(loc.split(":")[0] for loc in locs)),
})
return proposals
def analyze_session_transcripts(session_dirs: List[str]) -> List[Dict[str, Any]]:
"""Find repeated tool-call patterns in session transcripts."""
proposals = []
tool_sequence_counter = Counter()
tool_sequence_examples = {}
for session_dir in session_dirs:
session_path = Path(session_dir)
if not session_path.exists():
continue
for path in session_path.rglob("*.jsonl"):
try:
content = path.read_text(errors="replace")
except (PermissionError, OSError):
continue
# Extract tool calls in sequence
tool_sequence = []
for line in content.split("\n"):
line = line.strip()
if not line:
continue
try:
msg = json.loads(line)
except json.JSONDecodeError:
continue
# Look for tool calls in assistant messages
if msg.get("role") == "assistant" and msg.get("tool_calls"):
for tc in msg["tool_calls"]:
func_name = tc.get("function", {}).get("name", "?")
tool_sequence.append(func_name)
# Find 2-call sequences
for i in range(len(tool_sequence) - 1):
seq = (tool_sequence[i], tool_sequence[i + 1])
tool_sequence_counter[seq] += 1
if seq not in tool_sequence_examples:
tool_sequence_examples[seq] = str(path.name)
# Find 3-call sequences
for i in range(len(tool_sequence) - 2):
seq = (tool_sequence[i], tool_sequence[i + 1], tool_sequence[i + 2])
tool_sequence_counter[seq] += 1
if seq not in tool_sequence_examples:
tool_sequence_examples[seq] = str(path.name)
# Generate proposals for frequently repeated sequences
for seq, count in tool_sequence_counter.most_common(20):
if count < TOOL_SEQUENCE_MIN_OCCURRENCES:
break
seq_str = " -> ".join(seq)
proposals.append({
"category": "tool_sequence",
"title": f"Tool sequence '{seq_str}' repeated {count} times",
"description": f"Consider creating a workflow/skill that automates this sequence.",
"confidence": min(0.5 + (count * 0.05), 0.9),
"impact": "medium",
"sources": [tool_sequence_examples.get(seq, "unknown")],
})
return proposals
def analyze_shell_history(root_dirs: List[str]) -> List[Dict[str, Any]]:
"""Find repeated shell commands from history files."""
proposals = []
command_counter = Counter()
for root_dir in root_dirs:
root = Path(root_dir)
history_files = []
# Look for shell history files
for name in (".bash_history", ".zsh_history", ".python_history"):
p = root / name
if p.exists():
history_files.append(p)
# Also check in hermes home
for p in root.glob("**/*history*"):
if p.is_file() and p.suffix in ("", ".txt", ".log"):
history_files.append(p)
for hf in history_files:
try:
content = hf.read_text(errors="replace")
except (PermissionError, OSError):
continue
for line in content.split("\n"):
stripped = line.strip()
# ZSH history format: ": 1234567890:0;command"
stripped = re.sub(r"^:\s*\d+:\d+;", "", stripped)
if not stripped or len(stripped) < 5:
continue
# Skip trivial commands
if stripped in ("ls", "cd", "pwd", "clear", "exit"):
continue
command_counter[stripped] += 1
for cmd, count in command_counter.most_common(10):
if count < 5:
break
proposals.append({
"category": "shell_repetition",
"title": f"Shell command run {count}+ times: {cmd[:80]}",
"description": "Frequently repeated shell command — candidate for alias, function, or script.",
"confidence": min(0.4 + (count * 0.05), 0.85),
"impact": "low",
"sources": ["shell_history"],
})
return proposals
# ---------------------------------------------------------------------------
# Proposal output
# ---------------------------------------------------------------------------
def deduplicate_proposals(proposals: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Remove duplicate proposals based on title similarity."""
seen_titles = set()
unique = []
for p in proposals:
# Normalize title for dedup
key = re.sub(r"\d+", "N", p["title"]).lower()
if key not in seen_titles:
seen_titles.add(key)
unique.append(p)
return unique
def rank_proposals(proposals: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Sort proposals by impact * confidence (highest first)."""
impact_weight = {"critical": 1.0, "high": 0.8, "medium": 0.5, "low": 0.2}
return sorted(
proposals,
key=lambda p: impact_weight.get(p.get("impact", "low"), 0.2) * p.get("confidence", 0.5),
reverse=True,
)
def format_text_report(proposals: List[Dict[str, Any]]) -> str:
"""Format proposals as human-readable text."""
if not proposals:
return "No automation opportunities found."
lines = [
"=" * 70,
" AUTOMATION OPPORTUNITY REPORT",
f" Generated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}",
f" Proposals: {len(proposals)}",
"=" * 70,
"",
]
for i, p in enumerate(proposals, 1):
score = p.get("confidence", 0.5) * {"critical": 1.0, "high": 0.8, "medium": 0.5, "low": 0.2}.get(p.get("impact", "low"), 0.2)
lines.append(f"[{i}] {p['title']}")
lines.append(f" Category: {p['category']} | Impact: {p.get('impact','?')} | Confidence: {p.get('confidence',0):.0%} | Score: {score:.2f}")
lines.append(f" {p['description']}")
if p.get("sources"):
lines.append(f" Sources: {', '.join(p['sources'][:3])}")
lines.append("")
# Summary by category
cat_counts = Counter(p["category"] for p in proposals)
lines.append("-" * 70)
lines.append("Summary by category:")
for cat, count in cat_counts.most_common():
lines.append(f" {cat}: {count}")
return "\n".join(lines)
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="Find automation opportunities across the fleet")
parser.add_argument("--hermes-home", default=os.path.expanduser("~/.hermes"),
help="Path to hermes home directory (default: ~/.hermes)")
parser.add_argument("--scan-dirs", nargs="*",
help="Additional directories to scan (default: hermes-home + cwd)")
parser.add_argument("--session-dirs", nargs="*",
help="Session transcript directories (default: hermes-home/sessions)")
parser.add_argument("--json", action="store_true", help="Output as JSON")
parser.add_argument("--output", "-o", help="Write proposals to file")
parser.add_argument("--min-confidence", type=float, default=0.3,
help="Minimum confidence threshold (default: 0.3)")
parser.add_argument("--categories", nargs="*",
help="Only include these categories (cron, docs, scripts, sessions, shell)")
args = parser.parse_args()
hermes_home = os.path.expanduser(args.hermes_home)
# Default scan directories
scan_dirs = [hermes_home, "."]
if args.scan_dirs:
scan_dirs.extend(args.scan_dirs)
session_dirs = [os.path.join(hermes_home, "sessions")]
if args.session_dirs:
session_dirs.extend(args.session_dirs)
# Also check common session locations
for subdir in ("transcripts", "session-db"):
p = os.path.join(hermes_home, subdir)
if os.path.isdir(p):
session_dirs.append(p)
categories = set(args.categories) if args.categories else {"cron", "docs", "scripts", "sessions", "shell"}
# Run analyzers
all_proposals = []
if "cron" in categories:
all_proposals.extend(analyze_cron_jobs(hermes_home))
if "docs" in categories:
all_proposals.extend(analyze_documents(scan_dirs))
if "scripts" in categories:
all_proposals.extend(analyze_scripts(scan_dirs))
if "sessions" in categories:
all_proposals.extend(analyze_session_transcripts(session_dirs))
if "shell" in categories:
all_proposals.extend(analyze_shell_history(scan_dirs))
# Deduplicate and rank
all_proposals = deduplicate_proposals(all_proposals)
all_proposals = rank_proposals(all_proposals)
# Filter by confidence
all_proposals = [p for p in all_proposals if p.get("confidence", 0) >= args.min_confidence]
# Output
output = {
"generated_at": datetime.now(timezone.utc).isoformat(),
"hermes_home": hermes_home,
"scan_dirs": scan_dirs,
"total_proposals": len(all_proposals),
"proposals": all_proposals,
}
if args.json:
result = json.dumps(output, indent=2)
else:
result = format_text_report(all_proposals)
if args.output:
with open(args.output, "w") as f:
if args.json:
json.dump(output, f, indent=2)
else:
f.write(result)
print(f"Written to {args.output}", file=sys.stderr)
else:
print(result)
return 0 if all_proposals else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,131 +0,0 @@
#!/usr/bin/env python3
"""
Knowledge Store Staleness Detector — Detect stale knowledge entries by comparing source file hashes.
Usage:
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --json
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --fix
"""
import argparse
import hashlib
import json
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, Any, Optional
def compute_file_hash(filepath: str) -> Optional[str]:
"""Compute SHA-256 hash of a file. Returns None if file doesn't exist."""
try:
with open(filepath, "rb") as f:
return "sha256:" + hashlib.sha256(f.read()).hexdigest()
except (FileNotFoundError, IsADirectoryError, PermissionError):
return None
def check_staleness(index_path: str, repo_root: str = ".") -> List[Dict[str, Any]]:
"""Check all entries in knowledge index for staleness.
Returns list of entries with staleness info:
- status: "fresh" | "stale" | "missing_source" | "no_hash"
- current_hash: computed hash (if source exists)
- stored_hash: hash from index
"""
with open(index_path) as f:
data = json.load(f)
facts = data.get("facts", [])
results = []
for entry in facts:
source_file = entry.get("source_file")
stored_hash = entry.get("source_hash")
if not source_file:
results.append({**entry, "status": "no_source", "current_hash": None})
continue
full_path = os.path.join(repo_root, source_file)
current_hash = compute_file_hash(full_path)
if current_hash is None:
results.append({**entry, "status": "missing_source", "current_hash": None})
elif not stored_hash:
results.append({**entry, "status": "no_hash", "current_hash": current_hash})
elif current_hash != stored_hash:
results.append({**entry, "status": "stale", "current_hash": current_hash})
else:
results.append({**entry, "status": "fresh", "current_hash": current_hash})
return results
def fix_hashes(index_path: str, repo_root: str = ".") -> int:
"""Add hashes to entries missing them. Returns count of fixed entries."""
with open(index_path) as f:
data = json.load(f)
fixed = 0
for entry in data.get("facts", []):
if entry.get("source_hash"):
continue
source_file = entry.get("source_file")
if not source_file:
continue
full_path = os.path.join(repo_root, source_file)
h = compute_file_hash(full_path)
if h:
entry["source_hash"] = h
fixed += 1
with open(index_path, "w") as f:
json.dump(data, f, indent=2)
return fixed
def main():
parser = argparse.ArgumentParser(description="Check knowledge store staleness")
parser.add_argument("--index", required=True, help="Path to knowledge/index.json")
parser.add_argument("--repo", default=".", help="Repo root for source file resolution")
parser.add_argument("--json", action="store_true", help="Output as JSON")
parser.add_argument("--fix", action="store_true", help="Add hashes to entries missing them")
args = parser.parse_args()
if args.fix:
fixed = fix_hashes(args.index, args.repo)
print(f"Fixed {fixed} entries with missing hashes.")
return
results = check_staleness(args.index, args.repo)
if args.json:
print(json.dumps(results, indent=2))
else:
stale = [r for r in results if r["status"] != "fresh"]
fresh = [r for r in results if r["status"] == "fresh"]
print(f"Knowledge Store Staleness Check")
print(f" Total entries: {len(results)}")
print(f" Fresh: {len(fresh)}")
print(f" Stale/Issues: {len(stale)}")
print()
if stale:
print("Issues found:")
for r in stale:
status = r["status"]
fact = r.get("fact", "?")[:60]
source = r.get("source_file", "?")
print(f" [{status}] {source}: {fact}")
else:
print("All entries are fresh!")
if __name__ == "__main__":
main()

276
scripts/session_metadata.py Normal file
View File

@@ -0,0 +1,276 @@
#!/usr/bin/env python3
"""
session_metadata.py - Extract structured metadata from Hermes session transcripts.
Works alongside session_reader.py to provide higher-level session analysis.
"""
import json
import re
import sys
from dataclasses import dataclass, asdict
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any
# Import from session_reader (the canonical reader)
from session_reader import read_session
@dataclass
class SessionSummary:
"""Structured summary of a Hermes session transcript."""
session_id: str
model: str
repo: str
outcome: str
message_count: int
tool_calls: int
duration_estimate: str
key_actions: List[str]
errors_encountered: List[str]
start_time: Optional[str] = None
end_time: Optional[str] = None
total_tokens_estimate: int = 0
user_messages: int = 0
assistant_messages: int = 0
tool_outputs: int = 0
def extract_session_metadata(file_path: str) -> SessionSummary:
"""
Extract structured metadata from a Hermes session JSONL transcript.
Uses session_reader.read_session() for file reading.
"""
session_id = Path(file_path).stem
messages = []
model = "unknown"
repo = "unknown"
tool_calls_count = 0
key_actions = []
errors = []
start_time = None
end_time = None
total_tokens = 0
# Common repo patterns to look for
repo_patterns = [
r"(?:the-nexus|compounding-intelligence|timmy-config|hermes-agent)",
r"(?:forge\.alexanderwhitestone\.com/([^/]+/[^/\\s]+))",
r"(?:github\.com/([^/]+/[^/\\s]+))",
r"(?:Timmy_Foundation/([^/\\s]+))",
]
try:
# Use the canonical reader from session_reader.py
messages = read_session(file_path)
except FileNotFoundError:
return SessionSummary(
session_id=session_id,
model="unknown",
repo="unknown",
outcome="failure",
message_count=0,
tool_calls=0,
duration_estimate="0m",
key_actions=[],
errors_encountered=[f"File not found: {file_path}"]
)
# Process messages for metadata
for entry in messages:
# Extract model from assistant messages
if entry.get("role") == "assistant" and entry.get("model"):
model = entry["model"]
# Extract timestamps
if entry.get("timestamp"):
ts = entry["timestamp"]
if start_time is None:
start_time = ts
end_time = ts
# Count tool calls
if entry.get("tool_calls"):
tool_calls_count += len(entry["tool_calls"])
for tc in entry["tool_calls"]:
if tc.get("function", {}).get("name"):
action = f"{tc['function']['name']}"
if action not in key_actions:
key_actions.append(action)
# Estimate tokens from content length
content = entry.get("content", "")
if isinstance(content, str):
total_tokens += len(content.split())
elif isinstance(content, list):
for item in content:
if isinstance(item, dict) and "text" in item:
total_tokens += len(item["text"].split())
# Look for repo mentions in content
if entry.get("content"):
content_str = str(entry["content"])
for pattern in repo_patterns:
match = re.search(pattern, content_str, re.IGNORECASE)
if match:
if match.groups():
repo = match.group(1)
else:
repo = match.group(0)
break
# Look for error messages
if entry.get("role") == "tool" and entry.get("is_error"):
error_msg = entry.get("content", "Unknown error")
if isinstance(error_msg, str) and len(error_msg) < 200:
errors.append(error_msg[:200])
# Count message types
user_messages = sum(1 for m in messages if m.get("role") == "user")
assistant_messages = sum(1 for m in messages if m.get("role") == "assistant")
tool_outputs = sum(1 for m in messages if m.get("role") == "tool")
# Calculate duration estimate
duration_estimate = "unknown"
if start_time and end_time:
try:
# Try to parse timestamps
start_dt = None
end_dt = None
# Handle various timestamp formats
for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d %H:%M:%S"]:
try:
if start_dt is None:
start_dt = datetime.strptime(start_time, fmt)
if end_dt is None:
end_dt = datetime.strptime(end_time, fmt)
except ValueError:
continue
if start_dt and end_dt:
duration = end_dt - start_dt
minutes = duration.total_seconds() / 60
duration_estimate = f"{minutes:.0f}m"
except Exception:
pass
# Classify outcome
outcome = "unknown"
if errors:
# Check if any errors are fatal
fatal_errors = any("405" in e or "permission" in e.lower() or "authentication" in e.lower()
for e in errors)
if fatal_errors:
outcome = "failure"
else:
outcome = "partial"
elif messages:
# Check last message for success indicators
last_msg = messages[-1]
if last_msg.get("role") == "assistant":
content = last_msg.get("content", "")
if isinstance(content, str):
success_indicators = ["done", "completed", "success", "merged", "pushed"]
if any(indicator in content.lower() for indicator in success_indicators):
outcome = "success"
else:
outcome = "unknown"
# Deduplicate key actions (keep unique, limit to 10)
unique_actions = []
for action in key_actions:
if action not in unique_actions:
unique_actions.append(action)
if len(unique_actions) >= 10:
break
# Deduplicate errors (keep unique, limit to 5)
unique_errors = []
for error in errors:
if error not in unique_errors:
unique_errors.append(error)
if len(unique_errors) >= 5:
break
return SessionSummary(
session_id=session_id,
model=model,
repo=repo,
outcome=outcome,
message_count=len(messages),
tool_calls=tool_calls_count,
duration_estimate=duration_estimate,
key_actions=unique_actions,
errors_encountered=unique_errors,
start_time=start_time,
end_time=end_time,
total_tokens_estimate=total_tokens,
user_messages=user_messages,
assistant_messages=assistant_messages,
tool_outputs=tool_outputs
)
def process_session_directory(directory_path: str, output_file: Optional[str] = None) -> List[SessionSummary]:
"""
Process all JSONL files in a directory.
"""
directory = Path(directory_path)
if not directory.exists():
print(f"Error: Directory {directory_path} does not exist", file=sys.stderr)
return []
jsonl_files = list(directory.glob("*.jsonl"))
if not jsonl_files:
print(f"Warning: No JSONL files found in {directory_path}", file=sys.stderr)
return []
summaries = []
for jsonl_file in sorted(jsonl_files):
print(f"Processing {jsonl_file.name}...", file=sys.stderr)
summary = extract_session_metadata(str(jsonl_file))
summaries.append(summary)
if output_file:
with open(output_file, 'w', encoding='utf-8') as f:
json.dump([asdict(s) for s in summaries], f, indent=2)
print(f"Wrote {len(summaries)} summaries to {output_file}", file=sys.stderr)
return summaries
def main():
"""CLI entry point."""
import argparse
parser = argparse.ArgumentParser(description="Extract metadata from Hermes session JSONL transcripts")
parser.add_argument("path", help="Path to JSONL file or directory of session files")
parser.add_argument("-o", "--output", help="Output JSON file (default: stdout)")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
args = parser.parse_args()
path = Path(args.path)
if path.is_file():
summary = extract_session_metadata(str(path))
if args.output:
with open(args.output, 'w') as f:
json.dump(asdict(summary), f, indent=2)
print(f"Wrote summary to {args.output}", file=sys.stderr)
else:
print(json.dumps(asdict(summary), indent=2))
elif path.is_dir():
summaries = process_session_directory(str(path), args.output)
if not args.output:
print(json.dumps([asdict(s) for s in summaries], indent=2))
else:
print(f"Error: {args.path} is not a file or directory", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,147 +0,0 @@
#!/usr/bin/env python3
"""Tests for scripts/automation_opportunity_finder.py — 8 tests."""
import json
import os
import sys
import tempfile
sys.path.insert(0, os.path.dirname(__file__) or ".")
import importlib.util
spec = importlib.util.spec_from_file_location(
"aof",
os.path.join(os.path.dirname(__file__) or ".", "automation_opportunity_finder.py"),
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
def test_analyze_cron_jobs_no_file():
"""Returns empty list when no cron jobs file exists."""
with tempfile.TemporaryDirectory() as tmpdir:
result = mod.analyze_cron_jobs(tmpdir)
assert result == []
print("PASS: test_analyze_cron_jobs_no_file")
def test_analyze_cron_jobs_disabled():
"""Detects disabled cron jobs."""
with tempfile.TemporaryDirectory() as tmpdir:
cron_dir = os.path.join(tmpdir, "cron")
os.makedirs(cron_dir)
jobs = [
{"id": "j1", "name": "backup", "enabled": False, "schedule": "0 * * * *"},
{"id": "j2", "name": "health", "enabled": True, "schedule": "*/5 * * * *"},
]
with open(os.path.join(cron_dir, "jobs.json"), "w") as f:
json.dump(jobs, f)
result = mod.analyze_cron_jobs(tmpdir)
assert any(p["category"] == "cron_disabled" for p in result)
print("PASS: test_analyze_cron_jobs_disabled")
def test_analyze_cron_jobs_errors():
"""Detects cron jobs with error status."""
with tempfile.TemporaryDirectory() as tmpdir:
cron_dir = os.path.join(tmpdir, "cron")
os.makedirs(cron_dir)
jobs = [
{"id": "j1", "name": "broken", "enabled": True, "last_status": "error", "schedule": "0 * * * *"},
]
with open(os.path.join(cron_dir, "jobs.json"), "w") as f:
json.dump(jobs, f)
result = mod.analyze_cron_jobs(tmpdir)
assert any(p["category"] == "cron_errors" for p in result)
print("PASS: test_analyze_cron_jobs_errors")
def test_analyze_documents_finds_todos():
"""Detects TODO markers in documents."""
with tempfile.TemporaryDirectory() as tmpdir:
docs_dir = os.path.join(tmpdir, "docs")
os.makedirs(docs_dir)
for i in range(3):
with open(os.path.join(docs_dir, f"guide{i}.md"), "w") as f:
f.write(f"# Guide {i}\n\nTODO: Automate this step\n")
result = mod.analyze_documents([tmpdir])
assert any(p["category"] == "manual_todo" for p in result)
todo_proposals = [p for p in result if p["category"] == "manual_todo"]
assert todo_proposals[0]["details"].__len__() == 3
print("PASS: test_analyze_documents_finds_todos")
def test_analyze_scripts_repeated_commands():
"""Detects repeated shell commands across scripts."""
with tempfile.TemporaryDirectory() as tmpdir:
scripts_dir = os.path.join(tmpdir, "scripts")
os.makedirs(scripts_dir)
repeated_cmd = "docker restart myapp"
for i in range(4):
with open(os.path.join(scripts_dir, f"deploy{i}.sh"), "w") as f:
f.write(f"#!/bin/bash\n{repeated_cmd}\n")
result = mod.analyze_scripts([tmpdir])
assert any(p["category"] == "repeated_command" for p in result)
print("PASS: test_analyze_scripts_repeated_commands")
def test_analyze_session_transcripts():
"""Detects repeated tool-call sequences."""
with tempfile.TemporaryDirectory() as tmpdir:
sessions_dir = os.path.join(tmpdir, "sessions")
os.makedirs(sessions_dir)
for i in range(4):
with open(os.path.join(sessions_dir, f"session{i}.jsonl"), "w") as f:
f.write(json.dumps({"role": "user", "content": f"task {i}"}) + "\n")
f.write(json.dumps({
"role": "assistant",
"content": "working",
"tool_calls": [
{"function": {"name": "read_file"}},
{"function": {"name": "write_file"}},
]
}) + "\n")
result = mod.analyze_session_transcripts([sessions_dir])
assert any(p["category"] == "tool_sequence" for p in result)
seq_proposals = [p for p in result if p["category"] == "tool_sequence"]
assert any("read_file" in p["title"] and "write_file" in p["title"] for p in seq_proposals)
print("PASS: test_analyze_session_transcripts")
def test_deduplicate_proposals():
"""Deduplicates proposals with similar titles."""
proposals = [
{"title": "TODO found 3 times", "category": "manual_todo", "confidence": 0.7, "impact": "medium", "description": "x", "sources": []},
{"title": "TODO found 3 times", "category": "manual_todo", "confidence": 0.7, "impact": "medium", "description": "x", "sources": []},
{"title": "FIXME found 5 times", "category": "manual_fixme", "confidence": 0.8, "impact": "medium", "description": "y", "sources": []},
]
result = mod.deduplicate_proposals(proposals)
assert len(result) == 2
print("PASS: test_deduplicate_proposals")
def test_rank_proposals():
"""Ranks proposals by impact * confidence."""
proposals = [
{"title": "low", "category": "x", "confidence": 0.9, "impact": "low", "description": "", "sources": []},
{"title": "high", "category": "x", "confidence": 0.8, "impact": "high", "description": "", "sources": []},
{"title": "med", "category": "x", "confidence": 0.7, "impact": "medium", "description": "", "sources": []},
]
result = mod.rank_proposals(proposals)
assert result[0]["title"] == "high"
assert result[-1]["title"] == "low"
print("PASS: test_rank_proposals")
if __name__ == "__main__":
tests = [v for k, v in globals().items() if k.startswith("test_")]
passed = 0
failed = 0
for t in tests:
try:
t()
passed += 1
except Exception as e:
print(f"FAIL: {t.__name__}: {e}")
failed += 1
print(f"\n{passed}/{passed+failed} tests passed")
sys.exit(1 if failed else 0)

View File

@@ -1,129 +0,0 @@
#!/usr/bin/env python3
"""Tests for scripts/knowledge_staleness_check.py — 8 tests."""
import json
import os
import sys
import tempfile
sys.path.insert(0, os.path.dirname(__file__) or ".")
import importlib.util
spec = importlib.util.spec_from_file_location("ks", os.path.join(os.path.dirname(__file__) or ".", "knowledge_staleness_check.py"))
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
check_staleness = mod.check_staleness
fix_hashes = mod.fix_hashes
compute_file_hash = mod.compute_file_hash
def test_fresh_entry():
with tempfile.TemporaryDirectory() as tmpdir:
src = os.path.join(tmpdir, "source.py")
with open(src, "w") as f:
f.write("print('hello')")
h = compute_file_hash(src)
idx = os.path.join(tmpdir, "index.json")
with open(idx, "w") as f:
json.dump({"facts": [{"fact": "hello", "source_file": "source.py", "source_hash": h}]}, f)
results = check_staleness(idx, tmpdir)
assert results[0]["status"] == "fresh"
print("PASS: test_fresh_entry")
def test_stale_entry():
with tempfile.TemporaryDirectory() as tmpdir:
src = os.path.join(tmpdir, "source.py")
with open(src, "w") as f:
f.write("original content")
idx = os.path.join(tmpdir, "index.json")
with open(idx, "w") as f:
json.dump({"facts": [{"fact": "old", "source_file": "source.py", "source_hash": "sha256:wrong"}]}, f)
# Now change the source
with open(src, "w") as f:
f.write("modified content")
results = check_staleness(idx, tmpdir)
assert results[0]["status"] == "stale"
print("PASS: test_stale_entry")
def test_missing_source():
with tempfile.TemporaryDirectory() as tmpdir:
idx = os.path.join(tmpdir, "index.json")
with open(idx, "w") as f:
json.dump({"facts": [{"fact": "gone", "source_file": "nonexistent.py", "source_hash": "sha256:abc"}]}, f)
results = check_staleness(idx, tmpdir)
assert results[0]["status"] == "missing_source"
print("PASS: test_missing_source")
def test_no_hash():
with tempfile.TemporaryDirectory() as tmpdir:
src = os.path.join(tmpdir, "source.py")
with open(src, "w") as f:
f.write("content")
idx = os.path.join(tmpdir, "index.json")
with open(idx, "w") as f:
json.dump({"facts": [{"fact": "no hash", "source_file": "source.py"}]}, f)
results = check_staleness(idx, tmpdir)
assert results[0]["status"] == "no_hash"
assert results[0]["current_hash"].startswith("sha256:")
print("PASS: test_no_hash")
def test_no_source_field():
with tempfile.TemporaryDirectory() as tmpdir:
idx = os.path.join(tmpdir, "index.json")
with open(idx, "w") as f:
json.dump({"facts": [{"fact": "orphan"}]}, f)
results = check_staleness(idx, tmpdir)
assert results[0]["status"] == "no_source"
print("PASS: test_no_source_field")
def test_fix_hashes():
with tempfile.TemporaryDirectory() as tmpdir:
src = os.path.join(tmpdir, "source.py")
with open(src, "w") as f:
f.write("content for hashing")
idx = os.path.join(tmpdir, "index.json")
with open(idx, "w") as f:
json.dump({"facts": [{"fact": "needs hash", "source_file": "source.py"}]}, f)
fixed = fix_hashes(idx, tmpdir)
assert fixed == 1
# Verify hash was added
with open(idx) as f:
data = json.load(f)
assert data["facts"][0]["source_hash"].startswith("sha256:")
print("PASS: test_fix_hashes")
def test_empty_index():
with tempfile.TemporaryDirectory() as tmpdir:
idx = os.path.join(tmpdir, "index.json")
with open(idx, "w") as f:
json.dump({"facts": []}, f)
results = check_staleness(idx, tmpdir)
assert results == []
print("PASS: test_empty_index")
def test_compute_hash_nonexistent():
h = compute_file_hash("/nonexistent/path/file.py")
assert h is None
print("PASS: test_compute_hash_nonexistent")
def run_all():
test_fresh_entry()
test_stale_entry()
test_missing_source()
test_no_hash()
test_no_source_field()
test_fix_hashes()
test_empty_index()
test_compute_hash_nonexistent()
print("\nAll 8 tests passed!")
if __name__ == "__main__":
run_all()