110 lines
3.9 KiB
Python
110 lines
3.9 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Local Gitea Issue Processor — uses Hermes with local Ollama models.
|
|
|
|
Usage:
|
|
./bin/local-issue-processor.py # process one pending issue
|
|
./bin/local-issue-processor.py --issue 325 # process specific issue
|
|
./bin/local-issue-processor.py --dry-run # preview only
|
|
./bin/local-issue-processor.py --benchmark-only # run tok/s benchmark
|
|
|
|
Prereqs:
|
|
- Ollama running: ollama serve
|
|
- Models pulled: ollama pull gemma4 hermes3:8b hermes4:14b
|
|
- Hermes on PATH
|
|
- TIMMY_ENV=local-ollama ./deploy.sh run first
|
|
"""
|
|
|
|
import argparse, json, os, subprocess, sys
|
|
from pathlib import Path
|
|
|
|
HERMES_BIN = os.environ.get('HERMES_BIN', 'hermes')
|
|
LOCAL_ENV = {
|
|
'HERMES_MODEL': os.environ.get('HERMES_MODEL', 'gemma4'),
|
|
'HERMES_PROVIDER': 'custom',
|
|
'HERMES_BASE_URL': 'http://localhost:11434/v1',
|
|
}
|
|
GITEA_TOKEN_PATH = Path.home() / '.hermes' / 'gitea_token'
|
|
GITEA_REPO = os.environ.get('GITEA_REPO', 'Timmy_Foundation/timmy-config')
|
|
GITEA_URL = os.environ.get('GITEA_URL', 'https://forge.alexanderwhitestone.com')
|
|
|
|
|
|
def hermes_local(prompt):
|
|
env = os.environ.copy()
|
|
env.update(LOCAL_ENV)
|
|
tagged = f"[local-gitea] {prompt}"
|
|
try:
|
|
res = subprocess.run(
|
|
[HERMES_BIN, 'chat', '-q', tagged, '-Q', '-t', 'none'],
|
|
capture_output=True, text=True, timeout=120, env=env
|
|
)
|
|
if res.returncode == 0:
|
|
lines = [l for l in res.stdout.strip().split('\n') if not l.startswith('session_id:')]
|
|
return '\n'.join(lines).strip()
|
|
except Exception as e:
|
|
print(f"hermes call failed: {e}", file=sys.stderr)
|
|
return None
|
|
|
|
|
|
def fetch_issues():
|
|
if not GITEA_TOKEN_PATH.exists():
|
|
print(f"ERROR: Token missing at {GITEA_TOKEN_PATH}", file=sys.stderr)
|
|
sys.exit(1)
|
|
token = GITEA_TOKEN_PATH.read_text().strip()
|
|
req = urllib.request.Request(
|
|
f"{GITEA_URL}/api/v1/repos/{GITEA_REPO}/issues?state=open&limit=50",
|
|
headers={'Authorization': f'token {token}'}
|
|
)
|
|
try:
|
|
with urllib.request.urlopen(req, timeout=15) as resp:
|
|
return json.loads(resp.read())
|
|
except Exception as e:
|
|
print(f"Gitea fetch error: {e}", file=sys.stderr)
|
|
return []
|
|
|
|
|
|
def main():
|
|
p = argparse.ArgumentParser()
|
|
p.add_argument('--issue', type=int)
|
|
p.add_argument('--dry-run', action='store_true')
|
|
p.add_argument('--benchmark-only', action='store_true')
|
|
args = p.parse_args()
|
|
|
|
if args.benchmark_only:
|
|
print("Benchmark mode not implemented yet — run manually with time hermes chat")
|
|
return 0
|
|
|
|
import urllib.request
|
|
issues = fetch_issues()
|
|
if not issues:
|
|
print("No open issues.")
|
|
return 1
|
|
|
|
target = next((i for i in issues if i['number']==args.issue), None) if args.issue else issues[0]
|
|
if not target:
|
|
print("Issue not found.")
|
|
return 1
|
|
|
|
print(f"→ Processing Issue #{target['number']}: {target.get('title','')}")
|
|
prompt = f"Process Gitea Issue #{target['number']}: {target.get('title','')}\n\nBody:\n{target.get('body','')}\n\nRespond with exactly one line: either 'CLOSE <summary>' or 'COMMENT <text>'."
|
|
resp = hermes_local(prompt)
|
|
print(f"Model response: {resp}")
|
|
|
|
# Minimal implementation — demonstrate routing works
|
|
if resp and resp.strip():
|
|
cmd = resp.strip().split()[0].upper()
|
|
if cmd == 'CLOSE':
|
|
summary = ' '.join(resp.strip().split()[1:]) if len(resp.strip().split()) > 1 else 'Resolved locally'
|
|
print(f"[DRY-RUN] Would close issue: {summary}" if args.dry_run else print(f"✅ Would close: {summary}"))
|
|
return 0
|
|
elif cmd == 'COMMENT':
|
|
print(f"[DRY-RUN] Would comment: {' '.join(resp.strip().split()[1:])}" if args.dry_run else print("✅ Would comment"))
|
|
return 0
|
|
|
|
print("Model response unclear — check local inference setup.", file=sys.stderr)
|
|
return 3
|
|
|
|
|
|
if __name__ == '__main__':
|
|
sys.exit(main())
|