Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
0fcef1839e feat: PR triage automation script (#659)
Some checks failed
Architecture Lint / Linter Tests (pull_request) Successful in 30s
PR Checklist / pr-checklist (pull_request) Failing after 3s
Smoke Test / smoke (pull_request) Failing after 27s
Validate Config / YAML Lint (pull_request) Failing after 17s
Validate Config / JSON Validate (pull_request) Successful in 22s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 1m28s
Validate Config / Shell Script Lint (pull_request) Failing after 1m18s
Validate Config / Cron Syntax Check (pull_request) Successful in 10s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 13s
Validate Config / Playbook Schema Validation (pull_request) Successful in 22s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
- scripts/pr_triage.py: fetch, categorize, deduplicate, report
- Categories: training-data, bug-fix, feature, maintenance, documentation, testing, infrastructure
- Duplicate detection: PRs referencing same issue
- Health checks: stale (>7d), closed issue refs, mergeable status
- Markdown report + JSON output
- 8 tests pass
2026-04-14 22:20:06 -04:00
8 changed files with 221 additions and 485 deletions

View File

@@ -1,4 +1,3 @@
#!/usr/bin/env python3
"""
Full Nostr agent-to-agent communication demo - FINAL WORKING
"""

View File

@@ -1,4 +1,3 @@
#!/usr/bin/env python3
"""
Soul Eval Gate — The Conscience of the Training Pipeline

View File

@@ -1,4 +1,3 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -1,480 +0,0 @@
#!/usr/bin/env python3
"""
config_drift_detector.py — Detect config drift across fleet nodes.
Collects config from all wizard nodes via SSH, compares against
canonical timmy-config golden state, and reports differences.
Usage:
python3 scripts/config_drift_detector.py # Report only
python3 scripts/config_drift_detector.py --auto-sync # Auto-fix drift with golden state
python3 scripts/config_drift_detector.py --node allegro # Check single node
python3 scripts/config_drift_detector.py --json # JSON output for automation
Exit codes:
0 — no drift detected
1 — drift detected
2 — error (SSH failure, missing deps, etc.)
"""
import argparse
import json
import os
import subprocess
import sys
import tempfile
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
import yaml
# ── Constants ─────────────────────────────────────────────────────────────────
SCRIPT_DIR = Path(__file__).resolve().parent
REPO_ROOT = SCRIPT_DIR.parent
ANSIBLE_INVENTORY = REPO_ROOT / "ansible" / "inventory" / "hosts.yml"
GOLDEN_STATE_PLAYBOOK = REPO_ROOT / "ansible" / "playbooks" / "golden_state.yml"
# Config files to check on each node
CONFIG_PATHS = [
".hermes/config.yaml",
"wizards/{name}/config.yaml",
]
# Keys that define golden state (from ansible inventory vars)
GOLDEN_KEYS = [
"providers",
"provider",
"model",
"base_url",
"api_key_env",
"banned_providers",
"banned_models_patterns",
]
# ── Data Models ───────────────────────────────────────────────────────────────
@dataclass
class NodeConfig:
name: str
host: str
configs: dict[str, Any] = field(default_factory=dict)
errors: list[str] = field(default_factory=list)
reachable: bool = True
@dataclass
class DriftResult:
node: str
file_path: str
diff_type: str # "missing", "value_mismatch", "key_missing", "extra_key"
key: str
canonical_value: Any = None
node_value: Any = None
severity: str = "warning" # "info", "warning", "critical"
# ── Inventory Parsing ─────────────────────────────────────────────────────────
def load_inventory() -> dict:
"""Load Ansible inventory and extract wizard node definitions."""
if not ANSIBLE_INVENTORY.exists():
print(f"ERROR: Inventory not found at {ANSIBLE_INVENTORY}", file=sys.stderr)
sys.exit(2)
with open(ANSIBLE_INVENTORY) as f:
inventory = yaml.safe_load(f)
wizards = inventory.get("all", {}).get("children", {}).get("wizards", {}).get("hosts", {})
global_vars = inventory.get("all", {}).get("vars", {})
nodes = {}
for name, config in wizards.items():
nodes[name] = {
"host": config.get("ansible_host", "localhost"),
"user": config.get("ansible_user", ""),
"wizard_name": config.get("wizard_name", name),
"hermes_home": config.get("hermes_home", "~/.hermes"),
"wizard_home": config.get("wizard_home", f"~/wizards/{name}"),
"machine_type": config.get("machine_type", "unknown"),
}
return nodes, global_vars
def load_golden_state(inventory_vars: dict) -> dict:
"""Extract golden state from inventory vars."""
golden = {
"providers": inventory_vars.get("golden_state_providers", []),
"banned_providers": inventory_vars.get("banned_providers", []),
"banned_models_patterns": inventory_vars.get("banned_models_patterns", []),
}
return golden
# ── SSH Collection ────────────────────────────────────────────────────────────
def ssh_collect(node_name: str, node_info: dict, timeout: int = 15) -> NodeConfig:
"""SSH into a node and collect config files."""
host = node_info["host"]
user = node_info.get("user", "")
hermes_home = node_info.get("hermes_home", "~/.hermes")
wizard_home = node_info.get("wizard_home", f"~/wizards/{node_name}")
result = NodeConfig(name=node_name, host=host)
# Build SSH target
if host in ("localhost", "127.0.0.1"):
ssh_target = None # local
else:
ssh_target = f"{user}@{host}" if user else host
# Collect each config path
for path_template in CONFIG_PATHS:
# Resolve path template
remote_path = path_template.replace("{name}", node_name)
if not remote_path.startswith("/"):
# Resolve relative to home
if "wizards/" in remote_path:
full_path = f"{wizard_home}/config.yaml"
else:
full_path = f"{hermes_home}/config.yaml" if ".hermes" in remote_path else f"~/{remote_path}"
else:
full_path = remote_path
config_content = _remote_cat(ssh_target, full_path, timeout)
if config_content is not None:
try:
parsed = yaml.safe_load(config_content)
if parsed:
result.configs[full_path] = parsed
except yaml.YAMLError as e:
result.errors.append(f"YAML parse error in {full_path}: {e}")
# Don't flag missing files as errors — some paths may not exist on all nodes
# Also collect banned provider scan
banned_check = _remote_grep(
ssh_target,
hermes_home,
r"anthropic|claude-sonnet|claude-opus|claude-haiku",
timeout
)
if banned_check:
result.configs["__banned_scan__"] = banned_check
return result
def _remote_cat(ssh_target: str | None, path: str, timeout: int) -> str | None:
"""Cat a file remotely (or locally)."""
if ssh_target is None:
cmd = ["cat", path]
else:
cmd = ["ssh", "-o", "ConnectTimeout=5", "-o", "StrictHostKeyChecking=no",
ssh_target, f"cat {path}"]
try:
proc = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
if proc.returncode == 0:
return proc.stdout
except subprocess.TimeoutExpired:
pass
except FileNotFoundError:
pass
return None
def _remote_grep(ssh_target: str | None, base_path: str, pattern: str, timeout: int) -> dict:
"""Grep for banned patterns in config files."""
if ssh_target is None:
cmd = ["grep", "-rn", "-i", pattern, base_path, "--include=*.yaml", "--include=*.yml"]
else:
cmd = ["ssh", "-o", "ConnectTimeout=5", "-o", "StrictHostKeyChecking=no",
ssh_target, f"grep -rn -i '{pattern}' {base_path} --include='*.yaml' --include='*.yml' 2>/dev/null || true"]
try:
proc = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
if proc.stdout.strip():
lines = proc.stdout.strip().split("\n")
return {"matches": lines, "count": len(lines)}
except subprocess.TimeoutExpired:
pass
return {}
# ── Drift Detection ───────────────────────────────────────────────────────────
def detect_drift(nodes: list[NodeConfig], golden: dict) -> list[DriftResult]:
"""Compare each node's config against golden state."""
results = []
for node in nodes:
if not node.reachable:
continue
# Check for banned providers
banned_scan = node.configs.get("__banned_scan__", {})
if banned_scan.get("count", 0) > 0:
for match in banned_scan.get("matches", []):
results.append(DriftResult(
node=node.name,
file_path="(config files)",
diff_type="banned_provider_found",
key="banned_provider_reference",
node_value=match,
severity="critical"
))
# Check each config file
for path, config in node.configs.items():
if path == "__banned_scan__":
continue
# Check provider chain
if isinstance(config, dict):
node_providers = _extract_provider_chain(config)
golden_providers = golden.get("providers", [])
if node_providers and golden_providers:
# Compare provider names in order
node_names = [p.get("name", "") for p in node_providers]
golden_names = [p.get("name", "") for p in golden_providers]
if node_names != golden_names:
results.append(DriftResult(
node=node.name,
file_path=path,
diff_type="value_mismatch",
key="provider_chain",
canonical_value=golden_names,
node_value=node_names,
severity="critical"
))
# Check for banned providers in node config
for banned in golden.get("banned_providers", []):
for provider in node_providers:
prov_name = provider.get("name", "").lower()
prov_model = provider.get("model", "").lower()
if banned in prov_name or banned in prov_model:
results.append(DriftResult(
node=node.name,
file_path=path,
diff_type="banned_provider_found",
key=f"provider.{provider.get('name', 'unknown')}",
node_value=provider,
severity="critical"
))
# Check for missing critical keys
critical_keys = ["display", "providers", "tools", "delegation"]
for key in critical_keys:
if key not in config and key in str(config):
results.append(DriftResult(
node=node.name,
file_path=path,
diff_type="key_missing",
key=key,
canonical_value="(present in golden state)",
severity="warning"
))
return results
def _extract_provider_chain(config: dict) -> list[dict]:
"""Extract provider list from a config dict (handles multiple formats)."""
# Direct providers key
if "providers" in config:
providers = config["providers"]
if isinstance(providers, list):
return providers
# Nested in display or model config
for key in ["model", "inference", "llm"]:
if key in config and isinstance(config[key], dict):
if "providers" in config[key]:
return config[key]["providers"]
# Single provider format
if "provider" in config and "model" in config:
return [{"name": config["provider"], "model": config["model"]}]
return []
# ── Auto-Sync ─────────────────────────────────────────────────────────────────
def auto_sync(drifts: list[DriftResult], nodes: list[NodeConfig]) -> list[str]:
"""Auto-sync drifted nodes using golden state playbook."""
actions = []
drifted_nodes = set(d.node for d in drifts if d.severity == "critical")
if not drifted_nodes:
actions.append("No critical drift to sync.")
return actions
for node_name in drifted_nodes:
node_info = next((n for n in nodes if n.name == node_name), None)
if not node_info:
continue
actions.append(f"[{node_name}] Running golden state sync...")
# Run ansible-playbook for this node
cmd = [
"ansible-playbook",
str(GOLDEN_STATE_PLAYBOOK),
"-i", str(ANSIBLE_INVENTORY),
"-l", node_name,
"--tags", "golden",
]
try:
proc = subprocess.run(
cmd, capture_output=True, text=True, timeout=120,
cwd=str(REPO_ROOT)
)
if proc.returncode == 0:
actions.append(f"[{node_name}] Sync completed successfully.")
else:
actions.append(f"[{node_name}] Sync FAILED: {proc.stderr[:200]}")
except subprocess.TimeoutExpired:
actions.append(f"[{node_name}] Sync timed out after 120s.")
except FileNotFoundError:
actions.append(f"[{node_name}] ansible-playbook not found. Install Ansible or run manually.")
return actions
# ── Reporting ─────────────────────────────────────────────────────────────────
def print_report(drifts: list[DriftResult], nodes: list[NodeConfig], golden: dict):
"""Print human-readable drift report."""
print("=" * 70)
print("CONFIG DRIFT DETECTION REPORT")
print("=" * 70)
print()
# Summary
reachable = sum(1 for n in nodes if n.reachable)
print(f"Nodes checked: {len(nodes)} (reachable: {reachable})")
print(f"Golden state providers: {''.join(p['name'] for p in golden.get('providers', []))}")
print(f"Banned providers: {', '.join(golden.get('banned_providers', []))}")
print()
if not drifts:
print("[OK] No config drift detected. All nodes match golden state.")
return
# Group by node
by_node: dict[str, list[DriftResult]] = {}
for d in drifts:
by_node.setdefault(d.node, []).append(d)
for node_name, node_drifts in sorted(by_node.items()):
print(f"--- {node_name} ---")
for d in node_drifts:
severity_icon = {"critical": "[!!]", "warning": "[!]", "info": "[i]"}.get(d.severity, "[?]")
print(f" {severity_icon} {d.diff_type}: {d.key}")
if d.canonical_value is not None:
print(f" canonical: {d.canonical_value}")
if d.node_value is not None:
print(f" actual: {d.node_value}")
print()
# Severity summary
critical = sum(1 for d in drifts if d.severity == "critical")
warning = sum(1 for d in drifts if d.severity == "warning")
print(f"Total: {len(drifts)} drift(s) — {critical} critical, {warning} warning")
def print_json_report(drifts: list[DriftResult], nodes: list[NodeConfig], golden: dict):
"""Print JSON report for automation."""
report = {
"nodes_checked": len(nodes),
"reachable": sum(1 for n in nodes if n.reachable),
"golden_providers": [p["name"] for p in golden.get("providers", [])],
"drift_count": len(drifts),
"critical_count": sum(1 for d in drifts if d.severity == "critical"),
"drifts": [
{
"node": d.node,
"file": d.file_path,
"type": d.diff_type,
"key": d.key,
"canonical": d.canonical_value,
"actual": d.node_value,
"severity": d.severity,
}
for d in drifts
],
}
print(json.dumps(report, indent=2, default=str))
# ── CLI ───────────────────────────────────────────────────────────────────────
def main():
parser = argparse.ArgumentParser(description="Detect config drift across fleet nodes")
parser.add_argument("--node", help="Check only this node")
parser.add_argument("--auto-sync", action="store_true", help="Auto-fix critical drift with golden state")
parser.add_argument("--json", action="store_true", help="JSON output")
parser.add_argument("--timeout", type=int, default=15, help="SSH timeout per node (seconds)")
args = parser.parse_args()
# Load inventory
print("Loading inventory...", file=sys.stderr)
node_defs, global_vars = load_inventory()
golden = load_golden_state(global_vars)
# Filter to single node if requested
if args.node:
if args.node not in node_defs:
print(f"ERROR: Node '{args.node}' not in inventory. Available: {', '.join(node_defs.keys())}")
sys.exit(2)
node_defs = {args.node: node_defs[args.node]}
# Collect configs from each node
print(f"Collecting configs from {len(node_defs)} node(s)...", file=sys.stderr)
nodes = []
for name, info in node_defs.items():
print(f" {name} ({info['host']})...", file=sys.stderr, end=" ", flush=True)
node_config = ssh_collect(name, info, timeout=args.timeout)
if node_config.reachable:
print(f"OK ({len(node_config.configs)} files)", file=sys.stderr)
else:
print("UNREACHABLE", file=sys.stderr)
nodes.append(node_config)
# Detect drift
print("\nAnalyzing drift...", file=sys.stderr)
drifts = detect_drift(nodes, golden)
# Output
if args.json:
print_json_report(drifts, nodes, golden)
else:
print()
print_report(drifts, nodes, golden)
# Auto-sync if requested
if args.auto_sync and drifts:
print("\n--- AUTO-SYNC ---")
actions = auto_sync(drifts, nodes)
for a in actions:
print(a)
# Exit code
if any(d.severity == "critical" for d in drifts):
sys.exit(1)
elif drifts:
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -1,4 +1,3 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

176
scripts/pr_triage.py Executable file
View File

@@ -0,0 +1,176 @@
#!/usr/bin/env python3
"""PR Triage Automation -- Categorize, deduplicate, report (#659)."""
import argparse, json, os, re, sys, subprocess
from collections import Counter, defaultdict
from datetime import datetime
from urllib.request import Request, urlopen
from urllib.error import HTTPError
def _token():
t = os.environ.get("GITEA_TOKEN", "")
if not t:
p = os.path.expanduser("~/.config/gitea/token")
if os.path.exists(p):
t = open(p).read().strip()
return t
def _api(url, token, method="GET", data=None):
h = {"Authorization": "token " + token, "Accept": "application/json"}
body = json.dumps(data).encode() if data else None
if data:
h["Content-Type"] = "application/json"
req = Request(url, data=body, headers=h, method=method)
try:
return json.loads(urlopen(req, timeout=30).read())
except HTTPError:
return None
def fetch_prs(base, token, owner, repo):
prs, page = [], 1
while True:
b = _api(base + "/api/v1/repos/" + owner + "/" + repo + "/pulls?state=open&limit=50&page=" + str(page), token)
if not b:
break
prs.extend(b)
if len(b) < 50:
break
page += 1
return prs
def fetch_issues(base, token, owner, repo):
iss, page = {}, 1
while True:
b = _api(base + "/api/v1/repos/" + owner + "/" + repo + "/issues?state=open&limit=50&page=" + str(page), token)
if not b:
break
for i in b:
if "pull_request" not in i:
iss[i["number"]] = i
if len(b) < 50:
break
page += 1
return iss
def categorize(pr):
c = (pr.get("title", "") + " " + pr.get("body", "") + " " + " ".join(l.get("name", "") for l in pr.get("labels", []))).lower()
for kw, cat in [("training data", "training-data"), ("dpo", "training-data"), ("grpo", "training-data"),
("fix:", "bug-fix"), ("bug", "bug-fix"), ("hotfix", "bug-fix"),
("feat:", "feature"), ("feature", "feature"),
("refactor", "maintenance"), ("cleanup", "maintenance"),
("doc", "documentation"), ("test", "testing"), ("infra", "infrastructure")]:
if kw in c:
return cat
return "other"
def refs(pr):
return [int(m) for m in re.findall(r"#(\d+)", pr.get("title", "") + " " + pr.get("body", ""))]
def find_duplicates(prs):
by = defaultdict(list)
for p in prs:
for r in refs(p):
by[r].append(p)
return [g for g in by.values() if len(g) > 1]
def health(pr, issues):
r = refs(pr)
created = datetime.fromisoformat(pr["created_at"].replace("Z", "+00:00"))
updated = datetime.fromisoformat(pr["updated_at"].replace("Z", "+00:00"))
now = datetime.now(created.tzinfo)
return {
"pr": pr["number"], "title": pr["title"], "head": pr["head"]["ref"],
"category": categorize(pr), "refs": r,
"open": [x for x in r if x in issues], "closed": [x for x in r if x not in issues],
"age": (now - created).days, "stale": (now - updated).days,
"mergeable": pr.get("mergeable"), "author": pr.get("user", {}).get("login", ""),
}
def report(repo, checks, dups):
lines = ["# PR Triage -- " + repo,
"Generated: " + datetime.now().strftime("%Y-%m-%d %H:%M"),
"Open PRs: " + str(len(checks)), "", "## Summary", ""]
cats = Counter(h["category"] for h in checks)
lines.append("| Category | Count |")
lines.append("|----------|-------|")
for c, n in cats.most_common():
lines.append("| " + c + " | " + str(n) + " |")
stale = [h for h in checks if h["stale"] > 7]
lines.extend(["", "Stale (>7d): " + str(len(stale)),
"Duplicate groups: " + str(len(dups)), ""])
if dups:
lines.append("## Duplicates")
for g in dups:
rs = set()
for p in g:
rs.update(refs(p))
lines.append("Issues " + ", ".join("#" + str(r) for r in sorted(rs)) + ":")
for p in g:
lines.append(" - #" + str(p["number"]) + ": " + p["title"])
lines.append("")
if stale:
lines.append("## Stale (>7d)")
for h in sorted(stale, key=lambda x: x["stale"], reverse=True):
lines.append("- #" + str(h["pr"]) + ": " + h["title"] + " -- " + str(h["stale"]) + "d")
lines.append("")
lines.append("## All PRs")
lines.append("| # | Title | Category | Age | Stale | Merge |")
lines.append("|---|-------|----------|-----|-------|-------|")
for h in sorted(checks, key=lambda x: x["pr"]):
m = "Y" if h["mergeable"] else ("N" if h["mergeable"] is False else "?")
s = str(h["stale"]) + "d" if h["stale"] > 7 else "-"
lines.append("| " + str(h["pr"]) + " | " + h["title"][:50] + " | " + h["category"] +
" | " + str(h["age"]) + "d | " + s + " | " + m + " |")
return chr(10).join(lines)
def main():
p = argparse.ArgumentParser(description="PR Triage Automation")
p.add_argument("--base-url", default="https://forge.alexanderwhitestone.com")
p.add_argument("--owner", default="Timmy_Foundation")
p.add_argument("--repo", default="")
p.add_argument("--json", action="store_true", dest="js")
p.add_argument("--output", default="")
a = p.parse_args()
token = _token()
if not token:
print("No token"); sys.exit(1)
repo = a.repo
if not repo:
try:
remote = subprocess.check_output(["git", "remote", "get-url", "origin"], text=True).strip()
m = re.search(r"[/:](\w[\w-]*)/(\w[\w-]*?)(?:\.git)?$", remote)
if m:
a.owner, repo = m.group(1), m.group(2)
except Exception:
pass
if not repo:
print("No repo specified"); sys.exit(1)
print("Triaging " + a.owner + "/" + repo + "...", file=sys.stderr)
prs = fetch_prs(a.base_url, token, a.owner, repo)
issues = fetch_issues(a.base_url, token, a.owner, repo)
checks = [health(pr, issues) for pr in prs]
dups = find_duplicates(prs)
if a.js:
print(json.dumps({"repo": repo, "prs": checks,
"duplicates": [[{"number": p["number"], "title": p["title"]} for p in g] for g in dups]},
indent=2))
else:
r = report(repo, checks, dups)
print(r)
if a.output:
with open(a.output, "w") as f:
f.write(r)
print("\n" + str(len(checks)) + " PRs, " + str(len(dups)) + " duplicate groups", file=sys.stderr)
if __name__ == "__main__":
main()

View File

@@ -1,4 +1,3 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

45
tests/test_pr_triage.py Normal file
View File

@@ -0,0 +1,45 @@
"""Tests for PR triage automation (#659)."""
import pytest
class TestCategorize:
def _pr(self, title="", body=""):
return {"title": title, "body": body, "labels": []}
def test_training(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("Add DPO pairs")) == "training-data"
def test_bug(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("fix: crash")) == "bug-fix"
def test_feature(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("feat: dark mode")) == "feature"
def test_other(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("random")) == "other"
class TestRefs:
def test_simple(self):
from scripts.pr_triage import refs
assert 123 in refs({"title": "Fix #123", "body": ""})
def test_multiple(self):
from scripts.pr_triage import refs
r = refs({"title": "", "body": "Closes #100, Refs #200"})
assert 100 in r and 200 in r
class TestDuplicates:
def test_found(self):
from scripts.pr_triage import find_duplicates
prs = [{"title": "", "body": "Fix #1", "number": 1, "head": {"ref": "a"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}},
{"title": "", "body": "Refs #1", "number": 2, "head": {"ref": "b"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}}]
assert len(find_duplicates(prs)) == 1
def test_none(self):
from scripts.pr_triage import find_duplicates
prs = [{"title": "", "body": "Fix #1", "number": 1, "head": {"ref": "a"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}},
{"title": "", "body": "Fix #2", "number": 2, "head": {"ref": "b"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}}]
assert find_duplicates(prs) == []