Co-authored-by: Google Gemini <gemini@hermes.local> Co-committed-by: Google Gemini <gemini@hermes.local>
185 lines
6.0 KiB
Python
185 lines
6.0 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
# ── LLM-based Triage ──────────────────────────────────────────────────────────
|
|
#
|
|
# A Python script to automate the triage of the backlog using a local LLM.
|
|
# This script is intended to be a more robust and maintainable replacement for
|
|
# the `deep_triage.sh` script.
|
|
#
|
|
# ─────────────────────────────────────────────────────────────────────────────
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
from pathlib import Path
|
|
import ollama
|
|
import httpx
|
|
|
|
# Add src to PYTHONPATH
|
|
sys.path.append(str(Path(__file__).parent.parent / "src"))
|
|
from config import settings
|
|
|
|
# ── Constants ────────────────────────────────────────────────────────────────
|
|
REPO_ROOT = Path(__file__).parent.parent
|
|
QUEUE_PATH = REPO_ROOT / ".loop/queue.json"
|
|
RETRO_PATH = REPO_ROOT / ".loop/retro/deep-triage.jsonl"
|
|
SUMMARY_PATH = REPO_ROOT / ".loop/retro/summary.json"
|
|
PROMPT_PATH = REPO_ROOT / "scripts/deep_triage_prompt.md"
|
|
DEFAULT_MODEL = "qwen3:30b"
|
|
|
|
class GiteaClient:
|
|
"""A client for the Gitea API."""
|
|
|
|
def __init__(self, url: str, token: str, repo: str):
|
|
self.url = url
|
|
self.token = token
|
|
self.repo = repo
|
|
self.headers = {
|
|
"Authorization": f"token {token}",
|
|
"Content-Type": "application/json",
|
|
}
|
|
|
|
def create_issue(self, title: str, body: str) -> None:
|
|
"""Creates a new issue."""
|
|
url = f"{self.url}/api/v1/repos/{self.repo}/issues"
|
|
data = {"title": title, "body": body}
|
|
with httpx.Client() as client:
|
|
response = client.post(url, headers=self.headers, json=data)
|
|
response.raise_for_status()
|
|
|
|
def close_issue(self, issue_id: int) -> None:
|
|
"""Closes an issue."""
|
|
url = f"{self.url}/api/v1/repos/{self.repo}/issues/{issue_id}"
|
|
data = {"state": "closed"}
|
|
with httpx.Client() as client:
|
|
response = client.patch(url, headers=self.headers, json=data)
|
|
response.raise_for_status()
|
|
|
|
def get_llm_client():
|
|
"""Returns an Ollama client."""
|
|
return ollama.Client()
|
|
|
|
def get_prompt():
|
|
"""Returns the triage prompt."""
|
|
try:
|
|
return PROMPT_PATH.read_text()
|
|
except FileNotFoundError:
|
|
print(f"Error: Prompt file not found at {PROMPT_PATH}")
|
|
return ""
|
|
|
|
def get_context():
|
|
"""Returns the context for the triage prompt."""
|
|
queue_contents = ""
|
|
if QUEUE_PATH.exists():
|
|
queue_contents = QUEUE_PATH.read_text()
|
|
|
|
last_retro = ""
|
|
if RETRO_PATH.exists():
|
|
with open(RETRO_PATH, "r") as f:
|
|
lines = f.readlines()
|
|
if lines:
|
|
last_retro = lines[-1]
|
|
|
|
summary = ""
|
|
if SUMMARY_PATH.exists():
|
|
summary = SUMMARY_PATH.read_text()
|
|
|
|
return f"""
|
|
═══════════════════════════════════════════════════════════════════════════════
|
|
CURRENT CONTEXT (auto-injected)
|
|
═══════════════════════════════════════════════════════════════════════════════
|
|
|
|
CURRENT QUEUE (.loop/queue.json):
|
|
{queue_contents}
|
|
|
|
CYCLE SUMMARY (.loop/retro/summary.json):
|
|
{summary}
|
|
|
|
LAST DEEP TRIAGE RETRO:
|
|
{last_retro}
|
|
|
|
Do your work now.
|
|
"""
|
|
|
|
def parse_llm_response(response: str) -> tuple[list, dict]:
|
|
"""Parses the LLM's response."""
|
|
try:
|
|
data = json.loads(response)
|
|
return data.get("queue", []), data.get("retro", {})
|
|
except json.JSONDecodeError:
|
|
print("Error: Failed to parse LLM response as JSON.")
|
|
return [], {}
|
|
|
|
def write_queue(queue: list) -> None:
|
|
"""Writes the updated queue to disk."""
|
|
with open(QUEUE_PATH, "w") as f:
|
|
json.dump(queue, f, indent=2)
|
|
|
|
def write_retro(retro: dict) -> None:
|
|
"""Writes the retro entry to disk."""
|
|
with open(RETRO_PATH, "a") as f:
|
|
json.dump(retro, f)
|
|
f.write("\n")
|
|
|
|
def run_triage(model: str = DEFAULT_MODEL):
|
|
"""Runs the triage process."""
|
|
client = get_llm_client()
|
|
prompt = get_prompt()
|
|
if not prompt:
|
|
return
|
|
|
|
context = get_context()
|
|
|
|
full_prompt = f"{prompt}\n{context}"
|
|
|
|
try:
|
|
response = client.chat(
|
|
model=model,
|
|
messages=[
|
|
{
|
|
"role": "user",
|
|
"content": full_prompt,
|
|
},
|
|
],
|
|
)
|
|
llm_output = response["message"]["content"]
|
|
queue, retro = parse_llm_response(llm_output)
|
|
|
|
if queue:
|
|
write_queue(queue)
|
|
|
|
if retro:
|
|
write_retro(retro)
|
|
|
|
gitea_client = GiteaClient(
|
|
url=settings.gitea_url,
|
|
token=settings.gitea_token,
|
|
repo=settings.gitea_repo,
|
|
)
|
|
|
|
for issue_id in retro.get("issues_closed", []):
|
|
gitea_client.close_issue(issue_id)
|
|
|
|
for issue in retro.get("issues_created", []):
|
|
gitea_client.create_issue(issue["title"], issue["body"])
|
|
|
|
except ollama.ResponseError as e:
|
|
print(f"Error: Ollama API request failed: {e}")
|
|
except httpx.HTTPStatusError as e:
|
|
print(f"Error: Gitea API request failed: {e}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description="Automated backlog triage using an LLM.")
|
|
parser.add_argument(
|
|
"--model",
|
|
type=str,
|
|
default=DEFAULT_MODEL,
|
|
help=f"The Ollama model to use for triage (default: {DEFAULT_MODEL})",
|
|
)
|
|
args = parser.parse_args()
|
|
|
|
run_triage(model=args.model)
|