WIP: Gemini Code progress on #1018
Some checks failed
Tests / lint (pull_request) Failing after 34s
Tests / test (pull_request) Has been skipped

Automated salvage commit — agent session ended (exit 124).
Work in progress, may need continuation.
This commit is contained in:
Alexander Whitestone
2026-03-23 14:31:25 -04:00
parent 1be1324a0d
commit aeb906ea9c
2 changed files with 259 additions and 0 deletions

184
scripts/llm_triage.py Normal file
View File

@@ -0,0 +1,184 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ── LLM-based Triage ──────────────────────────────────────────────────────────
#
# A Python script to automate the triage of the backlog using a local LLM.
# This script is intended to be a more robust and maintainable replacement for
# the `deep_triage.sh` script.
#
# ─────────────────────────────────────────────────────────────────────────────
import json
import os
import sys
from pathlib import Path
import ollama
import httpx
# Add src to PYTHONPATH
sys.path.append(str(Path(__file__).parent.parent / "src"))
from config import settings
# ── Constants ────────────────────────────────────────────────────────────────
REPO_ROOT = Path(__file__).parent.parent
QUEUE_PATH = REPO_ROOT / ".loop/queue.json"
RETRO_PATH = REPO_ROOT / ".loop/retro/deep-triage.jsonl"
SUMMARY_PATH = REPO_ROOT / ".loop/retro/summary.json"
PROMPT_PATH = REPO_ROOT / "scripts/deep_triage_prompt.md"
DEFAULT_MODEL = "qwen3:30b"
class GiteaClient:
"""A client for the Gitea API."""
def __init__(self, url: str, token: str, repo: str):
self.url = url
self.token = token
self.repo = repo
self.headers = {
"Authorization": f"token {token}",
"Content-Type": "application/json",
}
def create_issue(self, title: str, body: str) -> None:
"""Creates a new issue."""
url = f"{self.url}/api/v1/repos/{self.repo}/issues"
data = {"title": title, "body": body}
with httpx.Client() as client:
response = client.post(url, headers=self.headers, json=data)
response.raise_for_status()
def close_issue(self, issue_id: int) -> None:
"""Closes an issue."""
url = f"{self.url}/api/v1/repos/{self.repo}/issues/{issue_id}"
data = {"state": "closed"}
with httpx.Client() as client:
response = client.patch(url, headers=self.headers, json=data)
response.raise_for_status()
def get_llm_client():
"""Returns an Ollama client."""
return ollama.Client()
def get_prompt():
"""Returns the triage prompt."""
try:
return PROMPT_PATH.read_text()
except FileNotFoundError:
print(f"Error: Prompt file not found at {PROMPT_PATH}")
return ""
def get_context():
"""Returns the context for the triage prompt."""
queue_contents = ""
if QUEUE_PATH.exists():
queue_contents = QUEUE_PATH.read_text()
last_retro = ""
if RETRO_PATH.exists():
with open(RETRO_PATH, "r") as f:
lines = f.readlines()
if lines:
last_retro = lines[-1]
summary = ""
if SUMMARY_PATH.exists():
summary = SUMMARY_PATH.read_text()
return f"""
═══════════════════════════════════════════════════════════════════════════════
CURRENT CONTEXT (auto-injected)
═══════════════════════════════════════════════════════════════════════════════
CURRENT QUEUE (.loop/queue.json):
{queue_contents}
CYCLE SUMMARY (.loop/retro/summary.json):
{summary}
LAST DEEP TRIAGE RETRO:
{last_retro}
Do your work now.
"""
def parse_llm_response(response: str) -> tuple[list, dict]:
"""Parses the LLM's response."""
try:
data = json.loads(response)
return data.get("queue", []), data.get("retro", {})
except json.JSONDecodeError:
print("Error: Failed to parse LLM response as JSON.")
return [], {}
def write_queue(queue: list) -> None:
"""Writes the updated queue to disk."""
with open(QUEUE_PATH, "w") as f:
json.dump(queue, f, indent=2)
def write_retro(retro: dict) -> None:
"""Writes the retro entry to disk."""
with open(RETRO_PATH, "a") as f:
json.dump(retro, f)
f.write("\n")
def run_triage(model: str = DEFAULT_MODEL):
"""Runs the triage process."""
client = get_llm_client()
prompt = get_prompt()
if not prompt:
return
context = get_context()
full_prompt = f"{prompt}\n{context}"
try:
response = client.chat(
model=model,
messages=[
{
"role": "user",
"content": full_prompt,
},
],
)
llm_output = response["message"]["content"]
queue, retro = parse_llm_response(llm_output)
if queue:
write_queue(queue)
if retro:
write_retro(retro)
gitea_client = GiteaClient(
url=settings.gitea_url,
token=settings.gitea_token,
repo=settings.gitea_repo,
)
for issue_id in retro.get("issues_closed", []):
gitea_client.close_issue(issue_id)
for issue in retro.get("issues_created", []):
gitea_client.create_issue(issue["title"], issue["body"])
except ollama.ResponseError as e:
print(f"Error: Ollama API request failed: {e}")
except httpx.HTTPStatusError as e:
print(f"Error: Gitea API request failed: {e}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Automated backlog triage using an LLM.")
parser.add_argument(
"--model",
type=str,
default=DEFAULT_MODEL,
help=f"The Ollama model to use for triage (default: {DEFAULT_MODEL})",
)
args = parser.parse_args()
run_triage(model=args.model)

View File

@@ -0,0 +1,75 @@
from unittest.mock import MagicMock, patch
import pytest
from scripts.llm_triage import (
get_context,
get_prompt,
parse_llm_response,
run_triage,
)
# ── Mocks ──────────────────────────────────────────────────────────────────
@pytest.fixture
def mock_files(tmp_path):
"""Creates mock files for the triage script."""
(tmp_path / ".loop/retro").mkdir(parents=True)
(tmp_path / "scripts").mkdir(parents=True)
(tmp_path / ".loop/queue.json").write_text("[]")
(tmp_path / ".loop/retro/summary.json").write_text("{}")
(tmp_path / ".loop/retro/deep-triage.jsonl").write_text("")
(tmp_path / "scripts/deep_triage_prompt.md").write_text("This is the prompt.")
return tmp_path
def test_get_prompt(mock_files):
"""Tests that the prompt is read correctly."""
with patch("scripts.llm_triage.PROMPT_PATH", mock_files / "scripts/deep_triage_prompt.md"):
prompt = get_prompt()
assert prompt == "This is the prompt."
def test_get_context(mock_files):
"""Tests that the context is constructed correctly."""
with patch("scripts.llm_triage.QUEUE_PATH", mock_files / ".loop/queue.json"), \
patch("scripts.llm_triage.SUMMARY_PATH", mock_files / ".loop/retro/summary.json"), \
patch("scripts.llm_triage.RETRO_PATH", mock_files / ".loop/retro/deep-triage.jsonl"):
context = get_context()
assert "CURRENT QUEUE (.loop/queue.json):\\n[]" in context
assert "CYCLE SUMMARY (.loop/retro/summary.json):\\n{}" in context
assert "LAST DEEP TRIAGE RETRO:\\n" in context
def test_parse_llm_response():
"""Tests that the LLM's response is parsed correctly."""
response = '{"queue": [1, 2, 3], "retro": {"a": 1}}'
queue, retro = parse_llm_response(response)
assert queue == [1, 2, 3]
assert retro == {"a": 1}
@patch("scripts.llm_triage.get_llm_client")
@patch("scripts.llm_triage.GiteaClient")
def test_run_triage(mock_gitea_client, mock_llm_client, mock_files):
"""Tests the main triage logic."""
mock_llm_client.return_value.chat.return_value = {
"message": {
"content": '{"queue": [{"issue": 1}], "retro": {"issues_closed": [2], "issues_created": [{"title": "New Issue", "body": "This is a new issue."}]}}'
}
}
with patch("scripts.llm_triage.PROMPT_PATH", mock_files / "scripts/deep_triage_prompt.md"),
patch("scripts.llm_triage.QUEUE_PATH", mock_files / ".loop/queue.json"),
patch("scripts.llm_triage.SUMMARY_PATH", mock_files / ".loop/retro/summary.json"),
patch("scripts.llm_triage.RETRO_PATH", mock_files / ".loop/retro/deep-triage.jsonl"):
run_triage()
# Check that the queue and retro files were written
assert (mock_files / ".loop/queue.json").read_text() == '[{"issue": 1}]'
assert (mock_files / ".loop/retro/deep-triage.jsonl").read_text() == '{"issues_closed": [2], "issues_created": [{"title": "New Issue", "body": "This is a new issue."}]}
'
# Check that the Gitea client was called correctly
mock_gitea_client.return_value.close_issue.assert_called_once_with(2)
mock_gitea_client.return_value.create_issue.assert_called_once_with(
"New Issue", "This is a new issue."
)