Files
hermes-agent/tools/scavenger_fixer.py
Google AI Agent 81f7347bcb
All checks were successful
Lint / lint (pull_request) Successful in 22s
feat: Scavenger Fixer — Autonomous tech debt healing
2026-04-22 03:15:17 +00:00

106 lines
3.4 KiB
Python

#!/usr/bin/env python3
"""
Autonomous Scavenger Fixer — Closing the loop on tech debt.
Uses the Sovereign Scavenger to find debt, the GOFAI sentries to verify context,
and the LLM to propose and apply fixes.
"""
import sys
import logging
from tools.registry import registry, tool_error, tool_result
from agent.auxiliary_client import call_llm
logger = logging.getLogger(__name__)
SCAVENGER_FIX_SCHEMA = {
"name": "scavenger_fix",
"description": "Autonomous 'Heal' mode. Scans for tech debt using the Scavenger, picks a high-confidence target, and attempts to fix it autonomously using the GOFAI-LLM hybrid loop.",
"parameters": {
"type": "object",
"properties": {
"target_file": {"type": "string", "description": "Specific file to focus on. If omitted, it scans and picks one."},
"max_fixes": {"type": "integer", "description": "Maximum number of items to fix in one run.", "default": 1}
}
}
}
async def autonomous_fix(target_file: str = None, max_fixes: int = 1):
"""Find and fix tech debt autonomously."""
# 1. Run Scavenger
scavenger = registry.get("sovereign_scavenger")
if not scavenger:
return tool_error("Sovereign Scavenger tool not found.")
scan_res = scavenger.handler({"path": ".", "create_issues": False})
if scan_res.get("status") == "Clean":
return tool_result(status="Healthy", message="No tech debt found to heal.")
items = scan_res.get("items", [])
if target_file:
items = [i for i in items if i["file"] == target_file]
if not items:
return tool_result(status="No Targets", message="No matching tech debt items found.")
# 2. Pick a target
target = items[0]
file_path = target["file"]
line_no = target["line"]
item_type = target["type"]
item_msg = target["message"]
print(f"Targeting {item_type} in {file_path}:{line_no}...")
# 3. Read context
try:
source = open(file_path, "r").read()
lines = source.split("\n")
context = "\n".join(lines[max(0, line_no - 10):min(len(lines), line_no + 10)])
except Exception as e:
return tool_error(f"Failed to read context from {file_path}: {e}")
# 4. Ask LLM for the fix
prompt = f"""
I found a {item_type} in {file_path} at line {line_no}.
Comment: {item_msg}
Context:
{context}
Please provide a fix for this tech debt. Return the full updated snippet for these lines.
"""
# Using Local Inference if available, else Cloud
try:
response = call_llm(
task="scavenger_fix",
messages=[{"role": "user", "content": prompt}]
)
fix_proposal = response.choices[0].message.content
except Exception as e:
return tool_error(f"LLM failed to propose fix: {e}")
# 5. Apply and Verify (GOFAI step)
# For now, we report the proposal. In a real autonomous loop, we'd use a patch tool.
return tool_result(
status="Fix Proposed",
target=target,
proposal=fix_proposal,
recommendation="The fleet has proposed a fix for this tech debt. Apply it using the 'patch_file' tool."
)
def _handle_scavenger_fix(args, **kwargs):
import asyncio
return asyncio.run(autonomous_fix(args.get("target_file"), args.get("max_fixes", 1)))
registry.register(
name="scavenger_fix",
toolset="dispatch",
schema=SCAVENGER_FIX_SCHEMA,
handler=_handle_scavenger_fix,
emoji="🩹"
)