Compare commits
1 Commits
step35/230
...
step35/88-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe517158a0 |
@@ -1,54 +0,0 @@
|
||||
{
|
||||
"version": "0.1",
|
||||
"description": "Memory bakeoff prompt matrix covering recall categories",
|
||||
"categories": {
|
||||
"preference_recall": {
|
||||
"description": "User preferences and past choices",
|
||||
"prompts": [
|
||||
"What's my preferred model for coding tasks?",
|
||||
"Which repository do I work on most frequently?",
|
||||
"What's my stance on cloud vs local-first?"
|
||||
]
|
||||
},
|
||||
"structured_fact_recall": {
|
||||
"description": "Specific concrete facts",
|
||||
"prompts": [
|
||||
"What does deploy-crons.py do with model fallback?",
|
||||
"How do I set up a VPS agent?",
|
||||
"What token path does the Gitea API use?"
|
||||
]
|
||||
},
|
||||
"architecture_decision_recall": {
|
||||
"description": "Why certain architectural choices were made",
|
||||
"prompts": [
|
||||
"Why was MemPalace chosen for memory?",
|
||||
"What's the reasoning behind session compaction strategy?",
|
||||
"Why use Three.js for the Nexus?"
|
||||
]
|
||||
},
|
||||
"fleet_operational_recall": {
|
||||
"description": "Operational procedures and fleet management",
|
||||
"prompts": [
|
||||
"How do I deploy a cron job to the fleet?",
|
||||
"What's the procedure for merging a PR?",
|
||||
"How do I rotate secrets across the fleet?"
|
||||
]
|
||||
},
|
||||
"contradiction_failure_framing": {
|
||||
"description": "Identify contradictions or past failures",
|
||||
"prompts": [
|
||||
"What are known pitfalls with provider fallback?",
|
||||
"When did session state get lost and why?",
|
||||
"What broke when we upgraded to Python 3.14?"
|
||||
]
|
||||
},
|
||||
"long_horizon": {
|
||||
"description": "Long-horizon memory that can't be solved by naive context stuffing",
|
||||
"prompts": [
|
||||
"Trace the evolution of the MemPalace integration from the beginning.",
|
||||
"Given our history with fleet deployments, what's the most common failure mode and how should we prevent it?",
|
||||
"How did the decision to use local-first architecture develop over time?"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,351 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PR Complexity Scorer - Estimate review effort for PRs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
GITEA_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
|
||||
DEPENDENCY_FILES = {
|
||||
"requirements.txt", "pyproject.toml", "setup.py", "setup.cfg",
|
||||
"Pipfile", "poetry.lock", "package.json", "yarn.lock", "Gemfile",
|
||||
"go.mod", "Cargo.toml", "pom.xml", "build.gradle"
|
||||
}
|
||||
|
||||
TEST_PATTERNS = [
|
||||
r"tests?/.*\.py$", r".*_test\.py$", r"test_.*\.py$",
|
||||
r"spec/.*\.rb$", r".*_spec\.rb$",
|
||||
r"__tests__/", r".*\.test\.(js|ts|jsx|tsx)$"
|
||||
]
|
||||
|
||||
WEIGHT_FILES = 0.25
|
||||
WEIGHT_LINES = 0.25
|
||||
WEIGHT_DEPS = 0.30
|
||||
WEIGHT_TEST_COV = 0.20
|
||||
|
||||
SMALL_FILES = 5
|
||||
MEDIUM_FILES = 20
|
||||
LARGE_FILES = 50
|
||||
|
||||
SMALL_LINES = 100
|
||||
MEDIUM_LINES = 500
|
||||
LARGE_LINES = 2000
|
||||
|
||||
TIME_PER_POINT = {1: 5, 2: 10, 3: 15, 4: 20, 5: 25, 6: 30, 7: 45, 8: 60, 9: 90, 10: 120}
|
||||
|
||||
|
||||
@dataclass
|
||||
class PRComplexity:
|
||||
pr_number: int
|
||||
title: str
|
||||
files_changed: int
|
||||
additions: int
|
||||
deletions: int
|
||||
has_dependency_changes: bool
|
||||
test_coverage_delta: Optional[int]
|
||||
score: int
|
||||
estimated_minutes: int
|
||||
reasons: List[str]
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
def __init__(self, token: str):
|
||||
self.token = token
|
||||
self.base_url = GITEA_BASE.rstrip("/")
|
||||
|
||||
def _request(self, path: str, params: Dict = None) -> Any:
|
||||
url = f"{self.base_url}{path}"
|
||||
if params:
|
||||
qs = "&".join(f"{k}={v}" for k, v in params.items() if v is not None)
|
||||
url += f"?{qs}"
|
||||
|
||||
req = urllib.request.Request(url)
|
||||
req.add_header("Authorization", f"token {self.token}")
|
||||
req.add_header("Content-Type", "application/json")
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read().decode())
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"API error {e.code}: {e.read().decode()[:200]}", file=sys.stderr)
|
||||
return None
|
||||
except urllib.error.URLError as e:
|
||||
print(f"Network error: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def get_open_prs(self, org: str, repo: str) -> List[Dict]:
|
||||
prs = []
|
||||
page = 1
|
||||
while True:
|
||||
batch = self._request(f"/repos/{org}/{repo}/pulls", {"limit": 50, "page": page, "state": "open"})
|
||||
if not batch:
|
||||
break
|
||||
prs.extend(batch)
|
||||
if len(batch) < 50:
|
||||
break
|
||||
page += 1
|
||||
return prs
|
||||
|
||||
def get_pr_files(self, org: str, repo: str, pr_number: int) -> List[Dict]:
|
||||
files = []
|
||||
page = 1
|
||||
while True:
|
||||
batch = self._request(
|
||||
f"/repos/{org}/{repo}/pulls/{pr_number}/files",
|
||||
{"limit": 100, "page": page}
|
||||
)
|
||||
if not batch:
|
||||
break
|
||||
files.extend(batch)
|
||||
if len(batch) < 100:
|
||||
break
|
||||
page += 1
|
||||
return files
|
||||
|
||||
def post_comment(self, org: str, repo: str, pr_number: int, body: str) -> bool:
|
||||
data = json.dumps({"body": body}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
f"{self.base_url}/repos/{org}/{repo}/issues/{pr_number}/comments",
|
||||
data=data,
|
||||
method="POST",
|
||||
headers={"Authorization": f"token {self.token}", "Content-Type": "application/json"}
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return resp.status in (200, 201)
|
||||
except urllib.error.HTTPError:
|
||||
return False
|
||||
|
||||
|
||||
def is_dependency_file(filename: str) -> bool:
|
||||
return any(filename.endswith(dep) for dep in DEPENDENCY_FILES)
|
||||
|
||||
|
||||
def is_test_file(filename: str) -> bool:
|
||||
return any(re.search(pattern, filename) for pattern in TEST_PATTERNS)
|
||||
|
||||
|
||||
def score_pr(
|
||||
files_changed: int,
|
||||
additions: int,
|
||||
deletions: int,
|
||||
has_dependency_changes: bool,
|
||||
test_coverage_delta: Optional[int] = None
|
||||
) -> tuple[int, int, List[str]]:
|
||||
score = 1.0
|
||||
reasons = []
|
||||
|
||||
# Files changed
|
||||
if files_changed <= SMALL_FILES:
|
||||
fscore = 1.0
|
||||
reasons.append("small number of files changed")
|
||||
elif files_changed <= MEDIUM_FILES:
|
||||
fscore = 2.0
|
||||
reasons.append("moderate number of files changed")
|
||||
elif files_changed <= LARGE_FILES:
|
||||
fscore = 2.5
|
||||
reasons.append("large number of files changed")
|
||||
else:
|
||||
fscore = 3.0
|
||||
reasons.append("very large PR spanning many files")
|
||||
|
||||
# Lines changed
|
||||
total_lines = additions + deletions
|
||||
if total_lines <= SMALL_LINES:
|
||||
lscore = 1.0
|
||||
reasons.append("small change size")
|
||||
elif total_lines <= MEDIUM_LINES:
|
||||
lscore = 2.0
|
||||
reasons.append("moderate change size")
|
||||
elif total_lines <= LARGE_LINES:
|
||||
lscore = 3.0
|
||||
reasons.append("large change size")
|
||||
else:
|
||||
lscore = 4.0
|
||||
reasons.append("very large change")
|
||||
|
||||
# Dependency changes
|
||||
if has_dependency_changes:
|
||||
dscore = 2.5
|
||||
reasons.append("dependency changes (architectural impact)")
|
||||
else:
|
||||
dscore = 0.0
|
||||
|
||||
# Test coverage delta
|
||||
tscore = 0.0
|
||||
if test_coverage_delta is not None:
|
||||
if test_coverage_delta > 0:
|
||||
reasons.append(f"test additions (+{test_coverage_delta} test files)")
|
||||
tscore = -min(2.0, test_coverage_delta / 2.0)
|
||||
elif test_coverage_delta < 0:
|
||||
reasons.append(f"test removals ({abs(test_coverage_delta)} test files)")
|
||||
tscore = min(2.0, abs(test_coverage_delta) * 0.5)
|
||||
else:
|
||||
reasons.append("test coverage change not assessed")
|
||||
|
||||
# Weighted sum, scaled by 3 to use full 1-10 range
|
||||
bonus = (fscore * WEIGHT_FILES) + (lscore * WEIGHT_LINES) + (dscore * WEIGHT_DEPS) + (tscore * WEIGHT_TEST_COV)
|
||||
scaled_bonus = bonus * 3.0
|
||||
score = 1.0 + scaled_bonus
|
||||
|
||||
final_score = max(1, min(10, int(round(score))))
|
||||
est_minutes = TIME_PER_POINT.get(final_score, 30)
|
||||
|
||||
return final_score, est_minutes, reasons
|
||||
|
||||
|
||||
def analyze_pr(client: GiteaClient, org: str, repo: str, pr_data: Dict) -> PRComplexity:
|
||||
pr_num = pr_data["number"]
|
||||
title = pr_data.get("title", "")
|
||||
files = client.get_pr_files(org, repo, pr_num)
|
||||
|
||||
additions = sum(f.get("additions", 0) for f in files)
|
||||
deletions = sum(f.get("deletions", 0) for f in files)
|
||||
filenames = [f.get("filename", "") for f in files]
|
||||
|
||||
has_deps = any(is_dependency_file(f) for f in filenames)
|
||||
|
||||
test_added = sum(1 for f in files if f.get("status") == "added" and is_test_file(f.get("filename", "")))
|
||||
test_removed = sum(1 for f in files if f.get("status") == "removed" and is_test_file(f.get("filename", "")))
|
||||
test_delta = test_added - test_removed if (test_added or test_removed) else None
|
||||
|
||||
score, est_min, reasons = score_pr(
|
||||
files_changed=len(files),
|
||||
additions=additions,
|
||||
deletions=deletions,
|
||||
has_dependency_changes=has_deps,
|
||||
test_coverage_delta=test_delta
|
||||
)
|
||||
|
||||
return PRComplexity(
|
||||
pr_number=pr_num,
|
||||
title=title,
|
||||
files_changed=len(files),
|
||||
additions=additions,
|
||||
deletions=deletions,
|
||||
has_dependency_changes=has_deps,
|
||||
test_coverage_delta=test_delta,
|
||||
score=score,
|
||||
estimated_minutes=est_min,
|
||||
reasons=reasons
|
||||
)
|
||||
|
||||
|
||||
def build_comment(complexity: PRComplexity) -> str:
|
||||
change_desc = f"{complexity.files_changed} files, +{complexity.additions}/-{complexity.deletions} lines"
|
||||
deps_note = "\n- :warning: Dependency changes detected — architectural review recommended" if complexity.has_dependency_changes else ""
|
||||
test_note = ""
|
||||
if complexity.test_coverage_delta is not None:
|
||||
if complexity.test_coverage_delta > 0:
|
||||
test_note = f"\n- :+1: {complexity.test_coverage_delta} test file(s) added"
|
||||
elif complexity.test_coverage_delta < 0:
|
||||
test_note = f"\n- :warning: {abs(complexity.test_coverage_delta)} test file(s) removed"
|
||||
|
||||
comment = f"## 📊 PR Complexity Analysis\n\n"
|
||||
comment += f"**PR #{complexity.pr_number}: {complexity.title}**\n\n"
|
||||
comment += f"| Metric | Value |\n|--------|-------|\n"
|
||||
comment += f"| Changes | {change_desc} |\n"
|
||||
comment += f"| Complexity Score | **{complexity.score}/10** |\n"
|
||||
comment += f"| Estimated Review Time | ~{complexity.estimated_minutes} minutes |\n\n"
|
||||
comment += f"### Scoring rationale:"
|
||||
for r in complexity.reasons:
|
||||
comment += f"\n- {r}"
|
||||
if deps_note:
|
||||
comment += deps_note
|
||||
if test_note:
|
||||
comment += test_note
|
||||
comment += f"\n\n---\n"
|
||||
comment += f"*Generated by PR Complexity Scorer — [issue #135](https://forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence/issues/135)*"
|
||||
return comment
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="PR Complexity Scorer")
|
||||
parser.add_argument("--org", default="Timmy_Foundation")
|
||||
parser.add_argument("--repo", default="compounding-intelligence")
|
||||
parser.add_argument("--token", default=os.environ.get("GITEA_TOKEN") or os.path.expanduser("~/.config/gitea/token"))
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--apply", action="store_true")
|
||||
parser.add_argument("--output", default="metrics/pr_complexity.json")
|
||||
args = parser.parse_args()
|
||||
|
||||
token_path = args.token
|
||||
if os.path.exists(token_path):
|
||||
with open(token_path) as f:
|
||||
token = f.read().strip()
|
||||
else:
|
||||
token = args.token
|
||||
|
||||
if not token:
|
||||
print("ERROR: No Gitea token provided", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
client = GiteaClient(token)
|
||||
|
||||
print(f"Fetching open PRs for {args.org}/{args.repo}...")
|
||||
prs = client.get_open_prs(args.org, args.repo)
|
||||
if not prs:
|
||||
print("No open PRs found.")
|
||||
sys.exit(0)
|
||||
|
||||
print(f"Found {len(prs)} open PR(s). Analyzing...")
|
||||
|
||||
results = []
|
||||
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for pr in prs:
|
||||
pr_num = pr["number"]
|
||||
title = pr.get("title", "")
|
||||
print(f" Analyzing PR #{pr_num}: {title[:60]}")
|
||||
|
||||
try:
|
||||
complexity = analyze_pr(client, args.org, args.repo, pr)
|
||||
results.append(complexity.to_dict())
|
||||
|
||||
comment = build_comment(complexity)
|
||||
|
||||
if args.dry_run:
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [DRY-RUN]")
|
||||
elif args.apply:
|
||||
success = client.post_comment(args.org, args.repo, pr_num, comment)
|
||||
status = "[commented]" if success else "[FAILED]"
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min {status}")
|
||||
else:
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [no action]")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ERROR analyzing PR #{pr_num}: {e}", file=sys.stderr)
|
||||
|
||||
with open(args.output, "w") as f:
|
||||
json.dump({
|
||||
"org": args.org,
|
||||
"repo": args.repo,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"pr_count": len(results),
|
||||
"results": results
|
||||
}, f, indent=2)
|
||||
|
||||
if results:
|
||||
scores = [r["score"] for r in results]
|
||||
print(f"\nResults saved to {args.output}")
|
||||
print(f"Summary: {len(results)} PRs, scores range {min(scores):.0f}-{max(scores):.0f}")
|
||||
else:
|
||||
print("\nNo results to save.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,489 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run a live memory bakeoff: baseline Hermes (knowledge store) vs MemPalace vs Hindsight.
|
||||
|
||||
Captures raw context-window artifacts and produces a scored report.
|
||||
|
||||
Usage:
|
||||
python3 scripts/run_memory_bakeoff.py --matrix prompts/matrix.json --output reports/
|
||||
python3 scripts/run_memory_bakeoff.py --category preference_recall --dry-run
|
||||
python3 scripts/run_memory_bakeoff.py --limit 3 # quick test
|
||||
|
||||
Exit codes:
|
||||
0 - success
|
||||
1 - missing required dependencies (LLM API key) or no prompts found
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
|
||||
# Load from environment (same as harvester)
|
||||
DEFAULT_API_BASE = os.environ.get("HARVESTER_API_BASE", "https://api.nousresearch.com/v1")
|
||||
DEFAULT_API_KEY = (
|
||||
next((p for p in [
|
||||
os.path.expanduser("~/.config/nous/key"),
|
||||
os.path.expanduser("~/.hermes/keymaxxing/active/minimax.key"),
|
||||
os.path.expanduser("~/.config/openrouter/key"),
|
||||
] if os.path.exists(p)), "")
|
||||
)
|
||||
DEFAULT_MODEL = os.environ.get("HARVESTER_MODEL", "xiaomi/mimo-v2-pro")
|
||||
DEFAULT_KNOWLEDGE_DIR = REPO_ROOT / "knowledge"
|
||||
DEFAULT_MEMPALACE_PATH = Path(os.path.expanduser("~/.hermes/mempalace-live/palace"))
|
||||
|
||||
# Token budget for context injection (rough estimate: 1 token ~ 4 chars)
|
||||
MAX_CONTEXT_TOKENS = 3000
|
||||
TOKENS_PER_CHAR = 0.25
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers — ensure optional deps
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _ensure_nexus_on_path():
|
||||
"""Ensure the-nexus repo is on sys.path for nexus.mempalace imports."""
|
||||
NEXUS_PATH = Path("/Users/apayne/the-nexus")
|
||||
if NEXUS_PATH.exists() and str(NEXUS_PATH) not in sys.path:
|
||||
sys.path.insert(0, str(NEXUS_PATH))
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# LLM API caller (mirrors harvester.py)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def call_llm(messages: list[dict], api_base: str, api_key: str, model: str, timeout: int = 60) -> Optional[str]:
|
||||
"""Call OpenAI-compatible chat completion API. Returns assistant content or None."""
|
||||
import urllib.request
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 1024,
|
||||
}).encode('utf-8')
|
||||
url = f"{api_base}/chat/completions"
|
||||
req = urllib.request.Request(
|
||||
url, data=payload,
|
||||
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
|
||||
method="POST"
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
result = json.loads(resp.read().decode('utf-8'))
|
||||
return result["choices"][0]["message"]["content"]
|
||||
except Exception as e:
|
||||
print(f" [WARN] LLM call failed: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Backend 1: Baseline — knowledge/index.json bootstrap
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def load_baseline_knowledge() -> list[dict]:
|
||||
"""Load facts from knowledge/index.json."""
|
||||
index_path = DEFAULT_KNOWLEDGE_DIR / "index.json"
|
||||
if not index_path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
return data.get("facts", [])
|
||||
except Exception as e:
|
||||
print(f" [WARN] Failed to load baseline knowledge: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
def query_baseline(question: str, max_tokens: int = MAX_CONTEXT_TOKENS) -> tuple[str, list[dict]]:
|
||||
"""
|
||||
Retrieve relevant facts from knowledge store using simple keyword matching.
|
||||
Returns (context_block, source_facts).
|
||||
"""
|
||||
facts = load_baseline_knowledge()
|
||||
if not facts:
|
||||
return "", []
|
||||
|
||||
q_words = set(question.lower().split())
|
||||
scored = []
|
||||
for fact in facts:
|
||||
fact_text = fact.get("fact", "").lower()
|
||||
overlap = len(q_words.intersection(set(fact_text.split())))
|
||||
scored.append((overlap, fact))
|
||||
|
||||
scored.sort(key=lambda x: -x[0])
|
||||
selected = []
|
||||
total_chars = 0
|
||||
for score, fact in scored:
|
||||
if score == 0:
|
||||
continue
|
||||
text = fact.get("fact", "")
|
||||
if total_chars + len(text) <= max_tokens / TOKENS_PER_CHAR:
|
||||
selected.append(fact)
|
||||
total_chars += len(text)
|
||||
else:
|
||||
break
|
||||
|
||||
if not selected:
|
||||
return "", []
|
||||
|
||||
# Format context
|
||||
lines = ["# Baseline Knowledge Facts\n"]
|
||||
for i, fact in enumerate(selected, 1):
|
||||
cat = fact.get('category', 'fact')
|
||||
txt = fact.get('fact', '')
|
||||
lines.append(f"{i}. [{cat}] {txt}\n")
|
||||
return "".join(lines), selected
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Backend 2: MemPalace — use nexus.mempalace.searcher
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_MEMPALACE_AVAILABLE = None # None = not probed yet
|
||||
|
||||
def ensure_mempalace() -> bool:
|
||||
"""Check if MemPalace (with deps) is available. Returns True/False."""
|
||||
global _MEMPALACE_AVAILABLE
|
||||
if _MEMPALACE_AVAILABLE is not None:
|
||||
return _MEMPALACE_AVAILABLE
|
||||
|
||||
try:
|
||||
_ensure_nexus_on_path()
|
||||
import chromadb # quick check
|
||||
from nexus.mempalace.searcher import search_memories
|
||||
_MEMPALACE_AVAILABLE = True
|
||||
return True
|
||||
except ImportError as e:
|
||||
print(f" [INFO] MemPalace not available: {e}", file=sys.stderr)
|
||||
_MEMPALACE_AVAILABLE = False
|
||||
return False
|
||||
|
||||
def query_mempalace(question: str, max_tokens: int = MAX_CONTEXT_TOKENS,
|
||||
palace_path: Path | None = None) -> tuple[str, list]:
|
||||
"""
|
||||
Query MemPalace for relevant memories.
|
||||
Returns (context_block, results_list).
|
||||
"""
|
||||
if not ensure_mempalace():
|
||||
return "[MemPalace unavailable: install chromadb and ensure nexus package is accessible]", []
|
||||
|
||||
try:
|
||||
from nexus.mempalace.searcher import search_memories
|
||||
path = palace_path or DEFAULT_MEMPALACE_PATH
|
||||
results = search_memories(question, palace_path=path, n_results=5)
|
||||
context_lines = ["# MemPalace Retrieval\n"]
|
||||
for r in results:
|
||||
context_lines.append(f"- [{r.room or 'general'}] {r.text}\n")
|
||||
return "".join(context_lines), results
|
||||
except Exception as e:
|
||||
return f"[MemPalace query failed: {e}]", []
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Backend 3: Hindsight — vectorize-io/hindsight
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_HINDSIGHT_AVAILABLE = None
|
||||
|
||||
def ensure_hindsight() -> bool:
|
||||
"""Check if Hindsight is available. Returns True/False."""
|
||||
global _HINDSIGHT_AVAILABLE
|
||||
if _HINDSIGHT_AVAILABLE is not None:
|
||||
return _HINDSIGHT_AVAILABLE
|
||||
|
||||
try:
|
||||
import hindsight # noqa: F401
|
||||
_HINDSIGHT_AVAILABLE = True
|
||||
return True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import shutil
|
||||
if shutil.which("hindsight"):
|
||||
_HINDSIGHT_AVAILABLE = True
|
||||
return True
|
||||
|
||||
_HINDSIGHT_AVAILABLE = False
|
||||
return False
|
||||
|
||||
def query_hindsight(question: str, max_tokens: int = MAX_CONTEXT_TOKENS) -> tuple[str, list]:
|
||||
"""
|
||||
Query local Hindsight vector store.
|
||||
Returns (context_block, results).
|
||||
"""
|
||||
if not ensure_hindsight():
|
||||
return "[Hindsight unavailable: install git+https://github.com/vectorize-io/hindsight.git]", []
|
||||
|
||||
# Try Python API first
|
||||
try:
|
||||
import hindsight
|
||||
# Hindsight API is not yet stable — provide a placeholder
|
||||
results = hindsight.search(question, k=5)
|
||||
context_lines = ["# Hindsight Retrieval\n"]
|
||||
for r in results:
|
||||
context_lines.append(f"- {getattr(r, 'text', str(r))}\n")
|
||||
return "".join(context_lines), results
|
||||
except Exception as e:
|
||||
return f"[Hindsight Python API error: {e}]", []
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# LLM answer generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
SYSTEM_PROMPT_TEMPLATE = """You are a sovereign AI assistant answering questions based on the provided context.
|
||||
|
||||
Answer concisely and accurately. If the context contains the answer, cite it.
|
||||
If unsure, say so. Do not hallucinate.
|
||||
|
||||
{context}
|
||||
"""
|
||||
|
||||
def build_system_prompt(context_block: str) -> str:
|
||||
return SYSTEM_PROMPT_TEMPLATE.format(context=context_block)
|
||||
|
||||
def ask(question: str, backend: str, context_block: str,
|
||||
api_base: str, api_key: str, model: str) -> dict:
|
||||
"""Generate answer using the given memory context. Returns artifact dict."""
|
||||
system = build_system_prompt(context_block)
|
||||
start = time.time()
|
||||
answer = call_llm(
|
||||
messages=[
|
||||
{"role": "system", "content": system},
|
||||
{"role": "user", "content": question}
|
||||
],
|
||||
api_base=api_base, api_key=api_key, model=model
|
||||
)
|
||||
elapsed = time.time() - start
|
||||
|
||||
artifact = {
|
||||
"backend": backend,
|
||||
"question": question,
|
||||
"system_prompt": system,
|
||||
"context_block": context_block,
|
||||
"answer": answer or "[LLM call failed]",
|
||||
"model": model,
|
||||
"api_base": api_base,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z'),
|
||||
"llm_latency_sec": round(elapsed, 3),
|
||||
}
|
||||
return artifact
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Simple scorer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def score_artifact(artifact: dict) -> dict:
|
||||
"""
|
||||
Compute simple scores:
|
||||
- context_precision: keyword overlap between question and context
|
||||
- retrieval_noise: 1 - precision (very noisy proxy)
|
||||
- answer_factual: heuristic based on answer length (proxy for being substantive)
|
||||
"""
|
||||
q = artifact["question"].lower()
|
||||
ctx = artifact["context_block"].lower()
|
||||
ans = artifact.get("answer", "").lower()
|
||||
|
||||
q_words = set(q.split())
|
||||
if not q_words:
|
||||
return {"context_precision": 0.0, "retrieval_noise": 1.0, "answer_factual": 0.0}
|
||||
|
||||
ctx_words = set(ctx.split())
|
||||
overlap = len(q_words & ctx_words) / len(q_words)
|
||||
|
||||
# Noise is 1 - precision. High noise means context has many irrelevant words.
|
||||
# To adjust for total size: also compute ratio of context words that overlap with question?
|
||||
relevant_ratio = len(q_words & ctx_words) / max(len(ctx_words), 1)
|
||||
|
||||
# Answer factual: word count capped at 1.0
|
||||
awc = len(ans.split())
|
||||
answer_factual = min(1.0, awc / 100.0)
|
||||
|
||||
return {
|
||||
"context_precision": round(overlap, 3),
|
||||
"retrieval_noise": round(1.0 - relevant_ratio, 3),
|
||||
"answer_factual": round(answer_factual, 3),
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main runner
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def load_matrix(path: Path) -> dict:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
|
||||
def run_bakeoff(matrix: dict, args):
|
||||
"""Execute evaluation across all prompts and backends."""
|
||||
api_base = args.api_base or DEFAULT_API_BASE
|
||||
api_key = args.api_key or DEFAULT_API_KEY
|
||||
model = args.model or DEFAULT_MODEL
|
||||
|
||||
if not api_key:
|
||||
print("ERROR: No API key found. Set HARVESTER_API_KEY, or pass --api-key.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
output_dir = Path(args.output).expanduser().resolve()
|
||||
artifacts_dir = output_dir / "artifacts"
|
||||
artifacts_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Build prompt list, optionally filtered by category
|
||||
prompts_to_run = []
|
||||
for cat_name, cat_data in matrix["categories"].items():
|
||||
if args.category and cat_name != args.category:
|
||||
continue
|
||||
for prompt_text in cat_data["prompts"]:
|
||||
prompts_to_run.append((cat_name, prompt_text))
|
||||
|
||||
if args.limit:
|
||||
prompts_to_run = prompts_to_run[:args.limit]
|
||||
|
||||
print(f"Bakeoff: {len(prompts_to_run)} prompts")
|
||||
print(f"Backends: baseline, mempalace", end="")
|
||||
if ensure_hindsight():
|
||||
print(", hindsight")
|
||||
else:
|
||||
print()
|
||||
|
||||
# Detect which backends are available
|
||||
backends = ["baseline", "mempalace"]
|
||||
if ensure_hindsight():
|
||||
backends.append("hindsight")
|
||||
|
||||
all_artifacts = []
|
||||
for idx, (cat_name, prompt) in enumerate(prompts_to_run, 1):
|
||||
print(f"\n{'='*60}")
|
||||
print(f"[{idx}/{len(prompts_to_run)}] Category: {cat_name}")
|
||||
print(f"Prompt: {prompt[:70]}")
|
||||
|
||||
for backend in backends:
|
||||
print(f" → {backend}...", end="", flush=True)
|
||||
|
||||
# Get context
|
||||
if backend == "baseline":
|
||||
ctx, sources = query_baseline(prompt)
|
||||
elif backend == "mempalace":
|
||||
ctx, sources = query_mempalace(prompt)
|
||||
else: # hindsight
|
||||
ctx, sources = query_hindsight(prompt)
|
||||
|
||||
# Generate answer
|
||||
artifact = ask(prompt, backend, ctx, api_base, api_key, model)
|
||||
artifact["category"] = cat_name
|
||||
artifact["sources_count"] = len(sources)
|
||||
artifact["context_char_count"] = len(ctx)
|
||||
artifact["context_token_est"] = int(len(ctx) * TOKENS_PER_CHAR)
|
||||
|
||||
# Score
|
||||
scores = score_artifact(artifact)
|
||||
artifact["scores"] = scores
|
||||
|
||||
# Save artifact
|
||||
safe_prompt = "".join(c if c.isalnum() else '_' for c in prompt[:30])
|
||||
fname = f"{cat_name}_{backend}_{safe_prompt}_{idx:03d}.json"
|
||||
fpath = artifacts_dir / fname
|
||||
with open(fpath, "w", encoding="utf-8") as f:
|
||||
json.dump(artifact, f, indent=2, ensure_ascii=False)
|
||||
|
||||
all_artifacts.append(artifact)
|
||||
print(f" done (ctx~{artifact['context_token_est']}t, ans:{len(artifact['answer'].split())}w, prec:{scores['context_precision']:.2f})")
|
||||
|
||||
generate_report(all_artifacts, output_dir)
|
||||
print(f"\n✓ Bakeoff complete.")
|
||||
print(f" Report: {output_dir / 'REPORT.md'}")
|
||||
print(f" Artifacts: {artifacts_dir}")
|
||||
|
||||
def generate_report(artifacts: list[dict], output_dir: Path):
|
||||
"""Create markdown summary with per-backend scores and simple verdicts."""
|
||||
lines = []
|
||||
lines.append("# Memory Bakeoff Report\n")
|
||||
lines.append(f"**Generated:** {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}\n")
|
||||
lines.append(f"**Total questions:** {len(artifacts)//len(set(a['backend'] for a in artifacts))}\n")
|
||||
|
||||
backends = sorted(set(a["backend"] for a in artifacts))
|
||||
lines.append("## Backend Summary\n")
|
||||
for backend in backends:
|
||||
ba = [a for a in artifacts if a["backend"] == backend]
|
||||
if not ba:
|
||||
continue
|
||||
avg_prec = sum(a["scores"]["context_precision"] for a in ba) / len(ba)
|
||||
avg_noise = sum(a["scores"]["retrieval_noise"] for a in ba) / len(ba)
|
||||
avg_fact = sum(a["scores"]["answer_factual"] for a in ba) / len(ba)
|
||||
lines.append(f"### {backend.upper()}\n")
|
||||
lines.append(f"- Avg context precision: {avg_prec:.1%}\n")
|
||||
lines.append(f"- Avg retrieval noise: {avg_noise:.1%}\n")
|
||||
lines.append(f"- Avg answer breadth: {avg_fact:.1%}\n")
|
||||
lines.append(f"- Runs: {len(ba)}\n\n")
|
||||
|
||||
lines.append("## Verdicts\n")
|
||||
for a in artifacts:
|
||||
s = a["scores"]
|
||||
verdict = "PASS" if s["context_precision"] >= 0.25 else "NEEDS_IMPROVEMENT"
|
||||
lines.append(f"- **{a['backend']} · {a['category']}**: {verdict} "
|
||||
f"(prec {s['context_precision']:.0%}, noise {s['retrieval_noise']:.0%})\n")
|
||||
|
||||
lines.append("\n## Recommendation\n\n")
|
||||
# Pick best by average precision
|
||||
best = max(backends, key=lambda b: sum(a["scores"]["context_precision"] for a in artifacts if a["backend"]==b))
|
||||
lines.append(f"Based on this sample, **{best.upper()}** achieved the highest context precision.\n")
|
||||
lines.append("For the sovereign Mac-local stack, the recommendation is:\n")
|
||||
lines.append("- **Baseline** (knowledge/index.json) for fast, deterministic fact lookup;\n")
|
||||
lines.append("- **MemPalace** for long-horizon narrative/agentic memory;\n")
|
||||
lines.append("- **Hindsight** requires additional installation and tuning.\n")
|
||||
lines.append("Consider a hybrid: lightweight retrieval from baseline + MemPalace for deep context.\n")
|
||||
|
||||
report_path = output_dir / "REPORT.md"
|
||||
report_path.write_text("".join(lines), encoding="utf-8")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def parse_args(argv: list[str] | None = None) -> argparse.Namespace:
|
||||
p = argparse.ArgumentParser(description="Memory bakeoff runner")
|
||||
p.add_argument("--matrix", default="prompts/matrix.json",
|
||||
help="Path to prompt matrix JSON file")
|
||||
p.add_argument("--output", default="reports",
|
||||
help="Output directory for artifacts and report")
|
||||
p.add_argument("--category",
|
||||
help="Run only this category (e.g., 'preference_recall')")
|
||||
p.add_argument("--limit", type=int,
|
||||
help="Limit number of prompts to run")
|
||||
p.add_argument("--api-base", default=DEFAULT_API_BASE,
|
||||
help="LLM API base URL (OpenAI-compatible)")
|
||||
p.add_argument("--api-key", default=DEFAULT_API_KEY,
|
||||
help="LLM API key (or set HARVESTER_API_KEY / key files)")
|
||||
p.add_argument("--model", default=DEFAULT_MODEL,
|
||||
help="LLM model name to use")
|
||||
p.add_argument("--dry-run", action="store_true",
|
||||
help="Print configuration and exit")
|
||||
return p.parse_args(argv)
|
||||
|
||||
def main(argv: list[str] | None = None):
|
||||
args = parse_args(argv)
|
||||
matrix_path = Path(args.matrix)
|
||||
if not matrix_path.exists():
|
||||
print(f"ERROR: Matrix not found at {matrix_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
matrix = load_matrix(matrix_path)
|
||||
|
||||
if args.dry_run:
|
||||
print("Dry run: configuration")
|
||||
print(f" Matrix: {args.matrix}")
|
||||
print(f" Categories: {list(matrix['categories'].keys())}")
|
||||
print(f" Total prompts:{sum(len(c['prompts']) for c in matrix['categories'].values())}")
|
||||
print(f" Backends: baseline, mempalace, hindsight (optional)")
|
||||
print(f" Output: {args.output}")
|
||||
return
|
||||
|
||||
run_bakeoff(matrix, args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -73,12 +73,14 @@ Binary files a/img.png and b/img.png differ
|
||||
|
||||
|
||||
def test_empty():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze("")
|
||||
assert s.total_files_changed == 0
|
||||
print("PASS: test_empty")
|
||||
|
||||
def test_addition():
|
||||
"""Verifies addition logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_ADD)
|
||||
assert s.total_files_changed == 1
|
||||
@@ -89,6 +91,7 @@ def test_addition():
|
||||
print("PASS: test_addition")
|
||||
|
||||
def test_deletion():
|
||||
"""Verifies deletion logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_DELETE)
|
||||
assert s.total_deleted == 2
|
||||
@@ -97,6 +100,7 @@ def test_deletion():
|
||||
print("PASS: test_deletion")
|
||||
|
||||
def test_modification():
|
||||
"""Verifies modification logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_MODIFY)
|
||||
assert s.total_added == 2
|
||||
@@ -105,6 +109,7 @@ def test_modification():
|
||||
print("PASS: test_modification")
|
||||
|
||||
def test_rename():
|
||||
"""Verifies rename logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_RENAME)
|
||||
assert s.renamed_files == 1
|
||||
@@ -114,6 +119,7 @@ def test_rename():
|
||||
print("PASS: test_rename")
|
||||
|
||||
def test_multiple_files():
|
||||
"""Verifies multiple files logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_MULTI)
|
||||
assert s.total_files_changed == 2
|
||||
@@ -121,6 +127,7 @@ def test_multiple_files():
|
||||
print("PASS: test_multiple_files")
|
||||
|
||||
def test_binary():
|
||||
"""Verifies binary logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_BINARY)
|
||||
assert s.binary_files == 1
|
||||
@@ -129,6 +136,7 @@ def test_binary():
|
||||
print("PASS: test_binary")
|
||||
|
||||
def test_to_dict():
|
||||
"""Verifies to dict logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_MODIFY)
|
||||
d = s.to_dict()
|
||||
@@ -138,6 +146,7 @@ def test_to_dict():
|
||||
print("PASS: test_to_dict")
|
||||
|
||||
def test_context_only():
|
||||
"""Verifies context only logic."""
|
||||
diff = """diff --git a/f.py b/f.py
|
||||
--- a/f.py
|
||||
+++ b/f.py
|
||||
@@ -154,6 +163,7 @@ def test_context_only():
|
||||
print("PASS: test_context_only")
|
||||
|
||||
def test_multi_hunk():
|
||||
"""Verifies multi hunk logic."""
|
||||
diff = """diff --git a/f.py b/f.py
|
||||
--- a/f.py
|
||||
+++ b/f.py
|
||||
|
||||
207
scripts/test_documentation_generator.py
Normal file
207
scripts/test_documentation_generator.py
Normal file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test Documentation Generator — adds module and function docstrings to test files.
|
||||
|
||||
Reads test files without docstrings and generates:
|
||||
- Module-level docstring explaining what is being tested
|
||||
- Function-level docstring explaining what each test verifies
|
||||
- Inline comments for complex assertions (simple heuristic)
|
||||
|
||||
Does not change test logic — only adds documentation.
|
||||
Processes 20+ test files per run.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
def derive_module_name(test_path: Path) -> str:
|
||||
"""Derive the script/module name being tested from test file name."""
|
||||
name = test_path.stem
|
||||
if name.startswith("test_"):
|
||||
name = name[5:] # strip 'test_' (5 chars: t-e-s-t-_, not 6)
|
||||
mapping = {
|
||||
"bootstrapper": "bootstrapper.py",
|
||||
"harvester": "harvester.py",
|
||||
"diff_analyzer": "diff_analyzer.py",
|
||||
"gitea_issue_parser": "gitea_issue_parser.py",
|
||||
"harvest_prompt": "harvest_prompt.py",
|
||||
"harvest_prompt_comprehensive": "harvest_prompt_comprehensive.py",
|
||||
"harvester_pipeline": "harvester_pipeline.py",
|
||||
"improvement_proposals": "improvement_proposals.py",
|
||||
"knowledge_staleness": "knowledge_staleness_check.py",
|
||||
"priority_rebalancer": "priority_rebalancer.py",
|
||||
"refactoring_opportunity_finder": "refactoring_opportunity_finder.py",
|
||||
"session_pair_harvester": "session_pair_harvester.py",
|
||||
"session_reader": "session_reader.py",
|
||||
"automation_opportunity_finder": "automation_opportunity_finder.py",
|
||||
"dedup": "dedup.py",
|
||||
"freshness": "freshness.py",
|
||||
"knowledge_gap_identifier": "knowledge_gap_identifier.py",
|
||||
"perf_bottleneck_finder": "perf_bottleneck_finder.py",
|
||||
"ci_config": "CI configuration",
|
||||
"quality_gate": "quality_gate.py",
|
||||
}
|
||||
base = name.replace("_", " ")
|
||||
if name in mapping:
|
||||
base = mapping[name].replace(".py", "")
|
||||
return base
|
||||
|
||||
|
||||
def count_tests_in_file(content: str) -> int:
|
||||
"""Count test functions in a Python file."""
|
||||
return len(re.findall(r'^def (test_\w+)\s*\(', content, re.MULTILINE))
|
||||
|
||||
|
||||
def infer_test_purpose(func_name: str, func_body: str) -> str:
|
||||
"""Generate a brief docstring for a test function based on its name and body."""
|
||||
name = func_name.replace("test_", "").replace("_", " ")
|
||||
|
||||
if "empty" in name or "none" in name:
|
||||
return "Verifies behavior with empty or None input."
|
||||
if "parsing" in name or "parse" in name:
|
||||
return f"Verifies parsing logic for {name}."
|
||||
if "filter" in name:
|
||||
return f"Verifies knowledge filtering by {name}."
|
||||
if "hash" in name:
|
||||
return "Verifies file hash computation correctness."
|
||||
if "freshness" in name or "staleness" in name:
|
||||
return "Verifies knowledge freshness detection."
|
||||
if "error" in name or "exception" in name:
|
||||
return f"Verifies error handling for {name}."
|
||||
if "boundary" in name or "edge" in name:
|
||||
return "Verifies boundary case handling."
|
||||
return f"Verifies {name} logic."
|
||||
|
||||
|
||||
def has_module_docstring(content: str) -> bool:
|
||||
"""Check if file (after shebang) starts with a docstring."""
|
||||
lines = content.split('\n')
|
||||
start_idx = 1 if lines and lines[0].startswith('#!') else 0
|
||||
for line in lines[start_idx:start_idx + 5]:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('"""') or stripped.startswith("'''"):
|
||||
return True
|
||||
if stripped == "" or stripped.startswith('#'):
|
||||
continue
|
||||
break
|
||||
return False
|
||||
|
||||
|
||||
def insert_after_shebang(content: str, insertion: str) -> str:
|
||||
"""Insert text after the shebang line (if any) and any following blank lines."""
|
||||
lines = content.split('\n')
|
||||
insert_idx = 0
|
||||
if lines and lines[0].startswith('#!'):
|
||||
insert_idx = 1
|
||||
while insert_idx < len(lines) and lines[insert_idx].strip() == '':
|
||||
insert_idx += 1
|
||||
new_lines = lines[:insert_idx] + [insertion] + lines[insert_idx:]
|
||||
return '\n'.join(new_lines)
|
||||
|
||||
|
||||
def add_function_docstring(content: str, func_lineno: int, docstring: str) -> str:
|
||||
"""Add a docstring to a function at the given line number."""
|
||||
lines = content.split('\n')
|
||||
idx = func_lineno - 1
|
||||
indent = re.match(r'^(\s*)', lines[idx]).group(1)
|
||||
doc_line = f'{indent} """{docstring}"""'
|
||||
new_lines = lines[:idx + 1] + [doc_line] + lines[idx + 1:]
|
||||
return '\n'.join(new_lines)
|
||||
|
||||
|
||||
def generate_module_docstring(test_path: Path) -> str:
|
||||
"""Generate a module-level docstring for a test file."""
|
||||
module = derive_module_name(test_path)
|
||||
count = count_tests_in_file(test_path.read_text())
|
||||
if count > 0:
|
||||
return f"Tests for {module} — {count} tests."
|
||||
return f"Tests for {module}."
|
||||
|
||||
|
||||
def process_test_file(test_path: Path, dry_run: bool = False) -> Tuple[bool, List[str]]:
|
||||
"""Process a single test file, adding missing docstrings. Returns (changed, messages)."""
|
||||
content = test_path.read_text()
|
||||
original = content
|
||||
messages = []
|
||||
|
||||
if not has_module_docstring(content):
|
||||
mod_doc = generate_module_docstring(test_path)
|
||||
content = insert_after_shebang(content, f'''"""{mod_doc}"""''')
|
||||
messages.append(f"Added module docstring: {mod_doc}")
|
||||
|
||||
try:
|
||||
tree = ast.parse(content)
|
||||
except SyntaxError as e:
|
||||
messages.append(f"SKIP (syntax error): {e}")
|
||||
return False, messages
|
||||
|
||||
funcs_to_doc: List[Tuple[int, str, str]] = []
|
||||
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef) and node.name.startswith('test_'):
|
||||
has_docstring = (
|
||||
len(node.body) > 0 and
|
||||
isinstance(node.body[0], ast.Expr) and
|
||||
isinstance(node.body[0].value, ast.Constant) and
|
||||
isinstance(node.body[0].value.value, str)
|
||||
)
|
||||
if not has_docstring:
|
||||
func_body = ast.get_source_segment(content, node) or ""
|
||||
doc = infer_test_purpose(node.name, func_body)
|
||||
funcs_to_doc.append((node.lineno, node.name, doc))
|
||||
|
||||
funcs_to_doc.sort(key=lambda x: -x[0])
|
||||
for lineno, func_name, doc in funcs_to_doc:
|
||||
content = add_function_docstring(content, lineno, doc)
|
||||
messages.append(f"Added docstring to {func_name}: {doc}")
|
||||
|
||||
changed = content != original
|
||||
if changed and not dry_run:
|
||||
test_path.write_text(content)
|
||||
|
||||
return changed, messages
|
||||
|
||||
|
||||
def find_test_files(root: Path, max_files: int = 25) -> List[Path]:
|
||||
"""Find test files under scripts/ and tests/ directories."""
|
||||
test_files = []
|
||||
for subdir in [root / "scripts", root / "tests"]:
|
||||
if subdir.exists():
|
||||
test_files.extend(subdir.glob("test_*.py"))
|
||||
test_files.sort()
|
||||
return test_files[:max_files]
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Generate documentation for test files")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show changes without writing")
|
||||
parser.add_argument("--root", type=Path, default=Path.cwd(),
|
||||
help="Repo root (default: current directory)")
|
||||
parser.add_argument("--limit", type=int, default=25,
|
||||
help="Max files to process per run (handles 20+ requirement)")
|
||||
args = parser.parse_args()
|
||||
|
||||
root = args.root
|
||||
test_files = find_test_files(root, args.limit)
|
||||
print(f"Found {len(test_files)} test files to process (limit={args.limit}):")
|
||||
|
||||
total_changed = 0
|
||||
for tf in test_files:
|
||||
changed, msgs = process_test_file(tf, dry_run=args.dry_run)
|
||||
if changed:
|
||||
total_changed += 1
|
||||
status = "CHANGED" if changed else "OK"
|
||||
print(f" [{status}] {tf.relative_to(root)}")
|
||||
for msg in msgs:
|
||||
print(f" {msg}")
|
||||
|
||||
print(f"\nCompleted: {total_changed} file(s) modified, {len(test_files) - total_changed} already up-to-date.")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -14,6 +14,7 @@ parse_issue_body = mod.parse_issue_body
|
||||
|
||||
|
||||
def test_basic_parsing():
|
||||
"""Verifies parsing logic for basic parsing."""
|
||||
body = """## Context
|
||||
|
||||
This is the background info.
|
||||
@@ -40,6 +41,7 @@ Some description.
|
||||
|
||||
|
||||
def test_numbered_criteria():
|
||||
"""Verifies numbered criteria logic."""
|
||||
body = """## Acceptance Criteria
|
||||
|
||||
1. First item
|
||||
@@ -53,6 +55,7 @@ def test_numbered_criteria():
|
||||
|
||||
|
||||
def test_epic_ref_from_body():
|
||||
"""Verifies epic ref from body logic."""
|
||||
body = "Closes #123\n\nSome description."
|
||||
result = parse_issue_body(body)
|
||||
assert result["epic_ref"] == 123
|
||||
@@ -60,6 +63,7 @@ def test_epic_ref_from_body():
|
||||
|
||||
|
||||
def test_empty_body():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
result = parse_issue_body("")
|
||||
assert result["criteria"] == []
|
||||
assert result["context"] == ""
|
||||
@@ -68,6 +72,7 @@ def test_empty_body():
|
||||
|
||||
|
||||
def test_no_sections():
|
||||
"""Verifies no sections logic."""
|
||||
body = "Just a plain issue body with no headings."
|
||||
result = parse_issue_body(body)
|
||||
assert result["context"] == "Just a plain issue body with no headings."
|
||||
@@ -75,6 +80,7 @@ def test_no_sections():
|
||||
|
||||
|
||||
def test_multiple_sections():
|
||||
"""Verifies multiple sections logic."""
|
||||
body = """## Problem
|
||||
|
||||
Something is broken.
|
||||
|
||||
@@ -46,22 +46,27 @@ def check_test_sessions():
|
||||
return True, f"{len(files)} valid sessions"
|
||||
|
||||
def test_prompt_structure():
|
||||
"""Verifies prompt structure logic."""
|
||||
passed, msg = check_prompt_structure()
|
||||
assert passed, msg
|
||||
|
||||
def test_confidence_scoring():
|
||||
"""Verifies confidence scoring logic."""
|
||||
passed, msg = check_confidence_scoring()
|
||||
assert passed, msg
|
||||
|
||||
def test_example_quality():
|
||||
"""Verifies example quality logic."""
|
||||
passed, msg = check_example_quality()
|
||||
assert passed, msg
|
||||
|
||||
def test_constraint_coverage():
|
||||
"""Verifies constraint coverage logic."""
|
||||
passed, msg = check_constraint_coverage()
|
||||
assert passed, msg
|
||||
|
||||
def test_test_sessions():
|
||||
"""Verifies sessions logic."""
|
||||
passed, msg = check_test_sessions()
|
||||
assert passed, msg
|
||||
|
||||
|
||||
@@ -47,12 +47,14 @@ def _make_tool_calls(repeats):
|
||||
# ── Tests ─────────────────────────────────────────────────────
|
||||
|
||||
def test_empty_sessions():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
patterns = analyze_sessions([])
|
||||
assert patterns == []
|
||||
print("PASS: test_empty_sessions")
|
||||
|
||||
|
||||
def test_no_patterns_on_clean_sessions():
|
||||
"""Verifies no patterns on clean sessions logic."""
|
||||
sessions = [
|
||||
_make_session("s1", tool_calls=[{"tool": "read_file", "latency_ms": 50}]),
|
||||
_make_session("s2", tool_calls=[{"tool": "write_file", "latency_ms": 80}]),
|
||||
|
||||
@@ -17,6 +17,7 @@ compute_file_hash = mod.compute_file_hash
|
||||
|
||||
|
||||
def test_fresh_entry():
|
||||
"""Verifies fresh entry logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -31,6 +32,7 @@ def test_fresh_entry():
|
||||
|
||||
|
||||
def test_stale_entry():
|
||||
"""Verifies stale entry logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -47,6 +49,7 @@ def test_stale_entry():
|
||||
|
||||
|
||||
def test_missing_source():
|
||||
"""Verifies missing source logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
@@ -57,6 +60,7 @@ def test_missing_source():
|
||||
|
||||
|
||||
def test_no_hash():
|
||||
"""Verifies file hash computation correctness."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -71,6 +75,7 @@ def test_no_hash():
|
||||
|
||||
|
||||
def test_no_source_field():
|
||||
"""Verifies no source field logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
@@ -81,6 +86,7 @@ def test_no_source_field():
|
||||
|
||||
|
||||
def test_fix_hashes():
|
||||
"""Verifies file hash computation correctness."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -98,6 +104,7 @@ def test_fix_hashes():
|
||||
|
||||
|
||||
def test_empty_index():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
@@ -108,6 +115,7 @@ def test_empty_index():
|
||||
|
||||
|
||||
def test_compute_hash_nonexistent():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
h = compute_file_hash("/nonexistent/path/file.py")
|
||||
assert h is None
|
||||
print("PASS: test_compute_hash_nonexistent")
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for PR Complexity Scorer — unit tests for the scoring logic.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
from pr_complexity_scorer import (
|
||||
score_pr,
|
||||
is_dependency_file,
|
||||
is_test_file,
|
||||
TIME_PER_POINT,
|
||||
SMALL_FILES,
|
||||
MEDIUM_FILES,
|
||||
LARGE_FILES,
|
||||
SMALL_LINES,
|
||||
MEDIUM_LINES,
|
||||
LARGE_LINES,
|
||||
)
|
||||
|
||||
PASS = 0
|
||||
FAIL = 0
|
||||
|
||||
def test(name):
|
||||
def decorator(fn):
|
||||
global PASS, FAIL
|
||||
try:
|
||||
fn()
|
||||
PASS += 1
|
||||
print(f" [PASS] {name}")
|
||||
except AssertionError as e:
|
||||
FAIL += 1
|
||||
print(f" [FAIL] {name}: {e}")
|
||||
except Exception as e:
|
||||
FAIL += 1
|
||||
print(f" [FAIL] {name}: Unexpected error: {e}")
|
||||
return decorator
|
||||
|
||||
def assert_eq(a, b, msg=""):
|
||||
if a != b:
|
||||
raise AssertionError(f"{msg} expected {b!r}, got {a!r}")
|
||||
|
||||
def assert_true(v, msg=""):
|
||||
if not v:
|
||||
raise AssertionError(msg or "Expected True")
|
||||
|
||||
def assert_false(v, msg=""):
|
||||
if v:
|
||||
raise AssertionError(msg or "Expected False")
|
||||
|
||||
|
||||
print("=== PR Complexity Scorer Tests ===\n")
|
||||
|
||||
print("-- File Classification --")
|
||||
|
||||
@test("dependency file detection — requirements.txt")
|
||||
def _():
|
||||
assert_true(is_dependency_file("requirements.txt"))
|
||||
assert_true(is_dependency_file("src/requirements.txt"))
|
||||
assert_false(is_dependency_file("requirements_test.txt"))
|
||||
|
||||
@test("dependency file detection — pyproject.toml")
|
||||
def _():
|
||||
assert_true(is_dependency_file("pyproject.toml"))
|
||||
assert_false(is_dependency_file("myproject.py"))
|
||||
|
||||
@test("test file detection — pytest style")
|
||||
def _():
|
||||
assert_true(is_test_file("tests/test_api.py"))
|
||||
assert_true(is_test_file("test_module.py"))
|
||||
assert_true(is_test_file("src/module_test.py"))
|
||||
|
||||
@test("test file detection — other frameworks")
|
||||
def _():
|
||||
assert_true(is_test_file("spec/feature_spec.rb"))
|
||||
assert_true(is_test_file("__tests__/component.test.js"))
|
||||
assert_false(is_test_file("testfixtures/helper.py"))
|
||||
|
||||
|
||||
print("\n-- Scoring Logic --")
|
||||
|
||||
@test("small PR gets low score (1-3)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=3,
|
||||
additions=50,
|
||||
deletions=10,
|
||||
has_dependency_changes=False,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(1 <= score <= 3, f"Score should be low, got {score}")
|
||||
assert_true(minutes < 20)
|
||||
|
||||
@test("medium PR gets medium score (4-6)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=15,
|
||||
additions=400,
|
||||
deletions=100,
|
||||
has_dependency_changes=False,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(4 <= score <= 6, f"Score should be medium, got {score}")
|
||||
assert_true(20 <= minutes <= 45)
|
||||
|
||||
@test("large PR gets high score (7-9)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=60,
|
||||
additions=3000,
|
||||
deletions=1500,
|
||||
has_dependency_changes=True,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(7 <= score <= 9, f"Score should be high, got {score}")
|
||||
assert_true(minutes >= 45)
|
||||
|
||||
@test("dependency changes boost score")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=10, additions=200, deletions=50,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
dep_score, _, _ = score_pr(
|
||||
files_changed=10, additions=200, deletions=50,
|
||||
has_dependency_changes=True, test_coverage_delta=None
|
||||
)
|
||||
assert_true(dep_score > base_score, f"Deps: {base_score} -> {dep_score}")
|
||||
|
||||
@test("adding tests lowers complexity")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
better_score, _, _ = score_pr(
|
||||
files_changed=8, additions=180, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=3
|
||||
)
|
||||
assert_true(better_score < base_score, f"Tests: {base_score} -> {better_score}")
|
||||
|
||||
@test("removing tests increases complexity")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
worse_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=-2
|
||||
)
|
||||
assert_true(worse_score > base_score, f"Remove tests: {base_score} -> {worse_score}")
|
||||
|
||||
@test("score bounded 1-10")
|
||||
def _():
|
||||
for files, adds, dels in [(1, 10, 5), (100, 10000, 5000)]:
|
||||
score, _, _ = score_pr(files, adds, dels, False, None)
|
||||
assert_true(1 <= score <= 10, f"Score {score} out of range")
|
||||
|
||||
@test("estimated minutes exist for all scores")
|
||||
def _():
|
||||
for s in range(1, 11):
|
||||
assert_true(s in TIME_PER_POINT, f"Missing time for score {s}")
|
||||
|
||||
|
||||
print(f"\n=== Results: {PASS} passed, {FAIL} failed ===")
|
||||
sys.exit(0 if FAIL == 0 else 1)
|
||||
@@ -11,6 +11,7 @@ from session_pair_harvester import extract_pairs_from_session, deduplicate_pairs
|
||||
|
||||
|
||||
def test_basic_extraction():
|
||||
"""Verifies basic extraction logic."""
|
||||
session = {
|
||||
"id": "test_001",
|
||||
"model": "test-model",
|
||||
@@ -29,6 +30,7 @@ def test_basic_extraction():
|
||||
|
||||
|
||||
def test_filters_short_responses():
|
||||
"""Verifies knowledge filtering by filters short responses."""
|
||||
session = {
|
||||
"id": "test_002",
|
||||
"model": "test",
|
||||
@@ -43,6 +45,7 @@ def test_filters_short_responses():
|
||||
|
||||
|
||||
def test_skips_tool_results():
|
||||
"""Verifies skips tool results logic."""
|
||||
session = {
|
||||
"id": "test_003",
|
||||
"model": "test",
|
||||
@@ -57,6 +60,7 @@ def test_skips_tool_results():
|
||||
|
||||
|
||||
def test_deduplication():
|
||||
"""Verifies deduplication logic."""
|
||||
pairs = [
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s1", "model": "m"},
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s2", "model": "m"},
|
||||
@@ -68,6 +72,7 @@ def test_deduplication():
|
||||
|
||||
|
||||
def test_ratio_filter():
|
||||
"""Verifies knowledge filtering by ratio filter."""
|
||||
session = {
|
||||
"id": "test_005",
|
||||
"model": "test",
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
"""Tests for CI configuration — 2 tests."""
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def test_requirements_makefile_and_workflow_exist() -> None:
|
||||
"""Verifies requirements makefile and workflow exist logic."""
|
||||
assert Path("requirements.txt").exists()
|
||||
assert Path("Makefile").exists()
|
||||
assert Path(".gitea/workflows/test.yml").exists()
|
||||
|
||||
|
||||
def test_ci_workflow_runs_project_test_command() -> None:
|
||||
"""Verifies ci workflow runs project command logic."""
|
||||
workflow = Path(".gitea/workflows/test.yml").read_text(encoding="utf-8")
|
||||
requirements = Path("requirements.txt").read_text(encoding="utf-8")
|
||||
makefile = Path("Makefile").read_text(encoding="utf-8")
|
||||
|
||||
@@ -22,28 +22,34 @@ from dedup import (
|
||||
|
||||
class TestNormalize:
|
||||
def test_lowercases(self):
|
||||
"""Verifies lowercases logic."""
|
||||
assert normalize_text("Hello World") == "hello world"
|
||||
|
||||
def test_collapses_whitespace(self):
|
||||
"""Verifies collapses whitespace logic."""
|
||||
assert normalize_text(" hello world ") == "hello world"
|
||||
|
||||
def test_strips(self):
|
||||
"""Verifies strips logic."""
|
||||
assert normalize_text(" text ") == "text"
|
||||
|
||||
|
||||
class TestContentHash:
|
||||
def test_deterministic(self):
|
||||
"""Verifies deterministic logic."""
|
||||
h1 = content_hash("Hello World")
|
||||
h2 = content_hash("hello world")
|
||||
h3 = content_hash(" Hello World ")
|
||||
assert h1 == h2 == h3
|
||||
|
||||
def test_different_texts(self):
|
||||
"""Verifies different texts logic."""
|
||||
h1 = content_hash("Hello")
|
||||
h2 = content_hash("World")
|
||||
assert h1 != h2
|
||||
|
||||
def test_returns_hex(self):
|
||||
"""Verifies returns hex logic."""
|
||||
h = content_hash("test")
|
||||
assert len(h) == 64 # SHA256
|
||||
assert all(c in '0123456789abcdef' for c in h)
|
||||
@@ -51,18 +57,21 @@ class TestContentHash:
|
||||
|
||||
class TestTokenize:
|
||||
def test_extracts_words(self):
|
||||
"""Verifies extracts words logic."""
|
||||
tokens = tokenize("Hello World Test")
|
||||
assert "hello" in tokens
|
||||
assert "world" in tokens
|
||||
assert "test" in tokens
|
||||
|
||||
def test_skips_short_words(self):
|
||||
"""Verifies skips short words logic."""
|
||||
tokens = tokenize("a to is the hello")
|
||||
assert "a" not in tokens
|
||||
assert "to" not in tokens
|
||||
assert "hello" in tokens
|
||||
|
||||
def test_returns_set(self):
|
||||
"""Verifies returns set logic."""
|
||||
tokens = tokenize("hello hello world")
|
||||
assert isinstance(tokens, set)
|
||||
assert len(tokens) == 2
|
||||
@@ -70,20 +79,25 @@ class TestTokenize:
|
||||
|
||||
class TestTokenSimilarity:
|
||||
def test_identical(self):
|
||||
"""Verifies identical logic."""
|
||||
assert token_similarity("hello world", "hello world") == 1.0
|
||||
|
||||
def test_no_overlap(self):
|
||||
"""Verifies no overlap logic."""
|
||||
assert token_similarity("alpha beta", "gamma delta") == 0.0
|
||||
|
||||
def test_partial_overlap(self):
|
||||
"""Verifies partial overlap logic."""
|
||||
sim = token_similarity("hello world test", "hello universe test")
|
||||
assert 0.3 < sim < 0.7
|
||||
|
||||
def test_empty(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
assert token_similarity("", "hello") == 0.0
|
||||
assert token_similarity("hello", "") == 0.0
|
||||
|
||||
def test_symmetric(self):
|
||||
"""Verifies symmetric logic."""
|
||||
a = "hello world test"
|
||||
b = "hello universe test"
|
||||
assert token_similarity(a, b) == token_similarity(b, a)
|
||||
@@ -91,22 +105,26 @@ class TestTokenSimilarity:
|
||||
|
||||
class TestQualityScore:
|
||||
def test_high_confidence(self):
|
||||
"""Verifies high confidence logic."""
|
||||
fact = {"confidence": 0.95, "source_count": 5, "tags": ["test"], "related": ["x"]}
|
||||
score = quality_score(fact)
|
||||
assert score > 0.7
|
||||
|
||||
def test_low_confidence(self):
|
||||
"""Verifies low confidence logic."""
|
||||
fact = {"confidence": 0.3, "source_count": 1}
|
||||
score = quality_score(fact)
|
||||
assert score < 0.5
|
||||
|
||||
def test_defaults(self):
|
||||
"""Verifies defaults logic."""
|
||||
score = quality_score({})
|
||||
assert 0 < score < 1
|
||||
|
||||
|
||||
class TestMergeFacts:
|
||||
def test_merges_tags(self):
|
||||
"""Verifies merges tags logic."""
|
||||
keep = {"id": "a", "fact": "test", "tags": ["git"], "confidence": 0.9}
|
||||
drop = {"id": "b", "fact": "test", "tags": ["python"], "confidence": 0.8}
|
||||
merged = merge_facts(keep, drop)
|
||||
@@ -114,18 +132,21 @@ class TestMergeFacts:
|
||||
assert "python" in merged["tags"]
|
||||
|
||||
def test_merges_source_count(self):
|
||||
"""Verifies merges source count logic."""
|
||||
keep = {"id": "a", "fact": "test", "source_count": 3}
|
||||
drop = {"id": "b", "fact": "test", "source_count": 2}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert merged["source_count"] == 5
|
||||
|
||||
def test_keeps_higher_confidence(self):
|
||||
"""Verifies keeps higher confidence logic."""
|
||||
keep = {"id": "a", "fact": "test", "confidence": 0.7}
|
||||
drop = {"id": "b", "fact": "test", "confidence": 0.9}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert merged["confidence"] == 0.9
|
||||
|
||||
def test_tracks_merged_from(self):
|
||||
"""Verifies tracks merged from logic."""
|
||||
keep = {"id": "a", "fact": "test"}
|
||||
drop = {"id": "b", "fact": "test"}
|
||||
merged = merge_facts(keep, drop)
|
||||
@@ -134,6 +155,7 @@ class TestMergeFacts:
|
||||
|
||||
class TestDedupFacts:
|
||||
def test_removes_exact_dupes(self):
|
||||
"""Verifies removes exact dupes logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Always use git rebase"},
|
||||
{"id": "2", "fact": "Always use git rebase"}, # exact dupe
|
||||
@@ -144,6 +166,7 @@ class TestDedupFacts:
|
||||
assert stats["unique"] == 2
|
||||
|
||||
def test_removes_near_dupes(self):
|
||||
"""Verifies removes near dupes logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Always check logs before deploying to production server"},
|
||||
{"id": "2", "fact": "Always check logs before deploying to production environment"},
|
||||
@@ -154,6 +177,7 @@ class TestDedupFacts:
|
||||
assert stats["unique"] == 2
|
||||
|
||||
def test_preserves_unique(self):
|
||||
"""Verifies preserves unique logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Use git rebase for clean history"},
|
||||
{"id": "2", "fact": "Docker containers should be stateless"},
|
||||
@@ -164,11 +188,13 @@ class TestDedupFacts:
|
||||
assert stats["removed"] == 0
|
||||
|
||||
def test_empty_input(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
deduped, stats = dedup_facts([])
|
||||
assert stats["total"] == 0
|
||||
assert stats["unique"] == 0
|
||||
|
||||
def test_keeps_higher_quality_near_dup(self):
|
||||
"""Verifies keeps higher quality near dup logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Check logs before deploying to production server", "confidence": 0.5, "source_count": 1},
|
||||
{"id": "2", "fact": "Check logs before deploying to production environment", "confidence": 0.9, "source_count": 5, "tags": ["ops"]},
|
||||
@@ -179,6 +205,7 @@ class TestDedupFacts:
|
||||
assert deduped[0]["confidence"] == 0.9
|
||||
|
||||
def test_dry_run_does_not_modify(self):
|
||||
"""Verifies dry run does not modify logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Same text"},
|
||||
{"id": "2", "fact": "Same text"},
|
||||
@@ -191,16 +218,19 @@ class TestDedupFacts:
|
||||
|
||||
class TestGenerateTestDuplicates:
|
||||
def test_generates_correct_count(self):
|
||||
"""Verifies generates correct count logic."""
|
||||
facts = generate_test_duplicates(20)
|
||||
assert len(facts) > 20 # 20 unique + duplicates
|
||||
|
||||
def test_has_exact_dupes(self):
|
||||
"""Verifies has exact dupes logic."""
|
||||
facts = generate_test_duplicates(20)
|
||||
hashes = [content_hash(f["fact"]) for f in facts]
|
||||
# Should have some duplicate hashes
|
||||
assert len(hashes) != len(set(hashes))
|
||||
|
||||
def test_dedup_removes_dupes(self):
|
||||
"""Verifies dedup removes dupes logic."""
|
||||
facts = generate_test_duplicates(20)
|
||||
deduped, stats = dedup_facts(facts)
|
||||
assert stats["unique"] <= 20
|
||||
|
||||
@@ -20,6 +20,7 @@ def _make_repo(tmpdir, structure):
|
||||
|
||||
|
||||
def test_undocumented_symbol():
|
||||
"""Verifies undocumented symbol logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -31,6 +32,7 @@ def test_undocumented_symbol():
|
||||
|
||||
|
||||
def test_documented_symbol_no_gap():
|
||||
"""Verifies documented symbol no gap logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -43,6 +45,7 @@ def test_documented_symbol_no_gap():
|
||||
|
||||
|
||||
def test_untested_module():
|
||||
"""Verifies untested module logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -55,6 +58,7 @@ def test_untested_module():
|
||||
|
||||
|
||||
def test_tested_module_no_gap():
|
||||
"""Verifies tested module no gap logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -67,6 +71,7 @@ def test_tested_module_no_gap():
|
||||
|
||||
|
||||
def test_missing_implementation():
|
||||
"""Verifies missing implementation logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def run():\n pass\n",
|
||||
@@ -78,6 +83,7 @@ def test_missing_implementation():
|
||||
|
||||
|
||||
def test_private_symbols_skipped():
|
||||
"""Verifies private symbols skipped logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def _internal():\n pass\ndef public():\n pass\n",
|
||||
@@ -90,18 +96,21 @@ def test_private_symbols_skipped():
|
||||
|
||||
|
||||
def test_empty_repo():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
assert len(report.gaps) == 0
|
||||
|
||||
|
||||
def test_invalid_path():
|
||||
"""Verifies invalid path logic."""
|
||||
report = KnowledgeGapIdentifier().analyze("/nonexistent/path/xyz")
|
||||
assert len(report.gaps) == 1
|
||||
assert report.gaps[0].severity == GapSeverity.ERROR
|
||||
|
||||
|
||||
def test_report_summary():
|
||||
"""Verifies report summary logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "class MyService:\n def handle(self):\n pass\n",
|
||||
@@ -114,6 +123,7 @@ def test_report_summary():
|
||||
|
||||
|
||||
def test_report_to_dict():
|
||||
"""Verifies report to dict logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def hello():\n pass\n",
|
||||
|
||||
@@ -32,6 +32,7 @@ class TestBottleneck:
|
||||
"""Test Bottleneck dataclass."""
|
||||
|
||||
def test_creation(self):
|
||||
"""Verifies creation logic."""
|
||||
b = Bottleneck(
|
||||
category="test",
|
||||
name="test_foo",
|
||||
@@ -48,6 +49,7 @@ class TestBottleneck:
|
||||
assert b.line_number is None
|
||||
|
||||
def test_with_location(self):
|
||||
"""Verifies with location logic."""
|
||||
b = Bottleneck(
|
||||
category="test",
|
||||
name="test_bar",
|
||||
@@ -61,6 +63,7 @@ class TestBottleneck:
|
||||
assert b.line_number == 42
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Verifies to dict logic."""
|
||||
b = Bottleneck("test", "x", 1.0, "info", "y")
|
||||
d = b.__dict__
|
||||
assert "category" in d
|
||||
@@ -71,6 +74,7 @@ class TestPerfReport:
|
||||
"""Test PerfReport dataclass."""
|
||||
|
||||
def test_creation(self):
|
||||
"""Verifies creation logic."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo"
|
||||
@@ -80,6 +84,7 @@ class TestPerfReport:
|
||||
assert report.summary == {}
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Verifies to dict logic."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo",
|
||||
@@ -94,6 +99,7 @@ class TestSeveritySort:
|
||||
"""Test severity sorting."""
|
||||
|
||||
def test_critical_first(self):
|
||||
"""Verifies critical first logic."""
|
||||
items = [
|
||||
Bottleneck("test", "a", 1.0, "info", ""),
|
||||
Bottleneck("test", "b", 0.5, "critical", ""),
|
||||
@@ -105,6 +111,7 @@ class TestSeveritySort:
|
||||
assert items[2].severity == "info"
|
||||
|
||||
def test_duration_within_severity(self):
|
||||
"""Verifies duration within severity logic."""
|
||||
items = [
|
||||
Bottleneck("test", "slow", 10.0, "warning", ""),
|
||||
Bottleneck("test", "fast", 1.0, "warning", ""),
|
||||
@@ -117,6 +124,7 @@ class TestSlowTestScan:
|
||||
"""Test slow test pattern scanning."""
|
||||
|
||||
def test_finds_sleep(self, tmp_path):
|
||||
"""Verifies finds sleep logic."""
|
||||
test_file = tmp_path / "test_sleepy.py"
|
||||
test_file.write_text(textwrap.dedent('''
|
||||
import time
|
||||
@@ -131,6 +139,7 @@ class TestSlowTestScan:
|
||||
assert any("sleep" in b.recommendation.lower() for b in bottlenecks)
|
||||
|
||||
def test_finds_http_calls(self, tmp_path):
|
||||
"""Verifies finds http calls logic."""
|
||||
test_file = tmp_path / "test_http.py"
|
||||
test_file.write_text(textwrap.dedent('''
|
||||
import requests
|
||||
@@ -145,6 +154,7 @@ class TestSlowTestScan:
|
||||
assert any("HTTP" in b.recommendation or "mock" in b.recommendation.lower() for b in bottlenecks)
|
||||
|
||||
def test_skips_non_test_files(self, tmp_path):
|
||||
"""Verifies skips non files logic."""
|
||||
src_file = tmp_path / "main.py"
|
||||
src_file.write_text("import time\ntime.sleep(10)\n")
|
||||
|
||||
@@ -152,10 +162,12 @@ class TestSlowTestScan:
|
||||
assert len(bottlenecks) == 0
|
||||
|
||||
def test_handles_missing_dir(self):
|
||||
"""Verifies handles missing dir logic."""
|
||||
bottlenecks = find_slow_tests_by_scan("/nonexistent/path")
|
||||
assert bottlenecks == []
|
||||
|
||||
def test_file_path_populated(self, tmp_path):
|
||||
"""Verifies file path populated logic."""
|
||||
test_file = tmp_path / "test_example.py"
|
||||
test_file.write_text("import time\n\ndef test_it():\n time.sleep(2)\n")
|
||||
|
||||
@@ -169,6 +181,7 @@ class TestBuildArtifacts:
|
||||
"""Test build artifact analysis."""
|
||||
|
||||
def test_finds_large_node_modules(self, tmp_path):
|
||||
"""Verifies finds large node modules logic."""
|
||||
nm = tmp_path / "node_modules"
|
||||
nm.mkdir()
|
||||
# Create a file > 10MB
|
||||
@@ -180,6 +193,7 @@ class TestBuildArtifacts:
|
||||
assert any("node_modules" in b.name for b in bottlenecks)
|
||||
|
||||
def test_ignores_small_dirs(self, tmp_path):
|
||||
"""Verifies ignores small dirs logic."""
|
||||
nm = tmp_path / "node_modules"
|
||||
nm.mkdir()
|
||||
small_file = nm / "small.txt"
|
||||
@@ -189,6 +203,7 @@ class TestBuildArtifacts:
|
||||
assert not any("node_modules" in b.name for b in bottlenecks)
|
||||
|
||||
def test_finds_pycache(self, tmp_path):
|
||||
"""Verifies finds pycache logic."""
|
||||
cache = tmp_path / "__pycache__"
|
||||
cache.mkdir()
|
||||
big_file = cache / "big.pyc"
|
||||
@@ -202,6 +217,7 @@ class TestMakefileAnalysis:
|
||||
"""Test Makefile analysis."""
|
||||
|
||||
def test_finds_pip_install(self, tmp_path):
|
||||
"""Verifies finds pip install logic."""
|
||||
makefile = tmp_path / "Makefile"
|
||||
makefile.write_text(textwrap.dedent('''
|
||||
install:
|
||||
@@ -215,6 +231,7 @@ class TestMakefileAnalysis:
|
||||
assert len(bottlenecks) >= 1
|
||||
|
||||
def test_no_makefile(self, tmp_path):
|
||||
"""Verifies no makefile logic."""
|
||||
bottlenecks = analyze_makefile_targets(str(tmp_path))
|
||||
assert bottlenecks == []
|
||||
|
||||
@@ -223,6 +240,7 @@ class TestImportAnalysis:
|
||||
"""Test heavy import detection."""
|
||||
|
||||
def test_finds_pandas(self, tmp_path):
|
||||
"""Verifies finds pandas logic."""
|
||||
src = tmp_path / "analysis.py"
|
||||
src.write_text("import pandas as pd\n")
|
||||
|
||||
@@ -231,6 +249,7 @@ class TestImportAnalysis:
|
||||
assert any("pandas" in b.name for b in bottlenecks)
|
||||
|
||||
def test_finds_torch(self, tmp_path):
|
||||
"""Verifies finds torch logic."""
|
||||
src = tmp_path / "model.py"
|
||||
src.write_text("import torch\n")
|
||||
|
||||
@@ -238,6 +257,7 @@ class TestImportAnalysis:
|
||||
assert any("torch" in b.name for b in bottlenecks)
|
||||
|
||||
def test_skips_light_imports(self, tmp_path):
|
||||
"""Verifies skips light imports logic."""
|
||||
src = tmp_path / "utils.py"
|
||||
src.write_text("import json\nimport os\nimport sys\n")
|
||||
|
||||
@@ -249,12 +269,14 @@ class TestGenerateReport:
|
||||
"""Test full report generation."""
|
||||
|
||||
def test_empty_repo(self, tmp_path):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
report = generate_report(str(tmp_path))
|
||||
assert report.summary["total_bottlenecks"] >= 0
|
||||
assert "critical" in report.summary
|
||||
assert "warning" in report.summary
|
||||
|
||||
def test_with_findings(self, tmp_path):
|
||||
"""Verifies with findings logic."""
|
||||
# Create a test file with issues
|
||||
test_file = tmp_path / "test_slow.py"
|
||||
test_file.write_text(textwrap.dedent('''
|
||||
@@ -273,6 +295,7 @@ class TestGenerateReport:
|
||||
assert len(report.bottlenecks) > 0
|
||||
|
||||
def test_summary_categories(self, tmp_path):
|
||||
"""Verifies summary categories logic."""
|
||||
report = generate_report(str(tmp_path))
|
||||
assert "by_category" in report.summary
|
||||
|
||||
@@ -281,6 +304,7 @@ class TestMarkdownReport:
|
||||
"""Test markdown output."""
|
||||
|
||||
def test_format(self):
|
||||
"""Verifies format logic."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo",
|
||||
@@ -303,6 +327,7 @@ class TestMarkdownReport:
|
||||
assert "Fix it" in md
|
||||
|
||||
def test_empty_report(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo",
|
||||
|
||||
@@ -21,27 +21,32 @@ from quality_gate import (
|
||||
|
||||
class TestScoreSpecificity(unittest.TestCase):
|
||||
def test_specific_content_scores_high(self):
|
||||
"""Verifies specific content scores high logic."""
|
||||
content = "Run `python3 deploy.py --env prod` on 2026-04-15. Example: step 1 configure nginx."
|
||||
score = score_specificity(content)
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_vague_content_scores_low(self):
|
||||
"""Verifies vague content scores low logic."""
|
||||
content = "It generally depends. Various factors might affect this. Basically, it varies."
|
||||
score = score_specificity(content)
|
||||
self.assertLess(score, 0.5)
|
||||
|
||||
def test_empty_scores_baseline(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
score = score_specificity("")
|
||||
self.assertAlmostEqual(score, 0.5, delta=0.1)
|
||||
|
||||
|
||||
class TestScoreActionability(unittest.TestCase):
|
||||
def test_actionable_content_scores_high(self):
|
||||
"""Verifies actionable content scores high logic."""
|
||||
content = "1. Run `pip install -r requirements.txt`\n2. Execute `python3 train.py`\n3. Verify with `pytest`"
|
||||
score = score_actionability(content)
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_abstract_content_scores_low(self):
|
||||
"""Verifies abstract content scores low logic."""
|
||||
content = "The concept of intelligence is fascinating and multifaceted."
|
||||
score = score_actionability(content)
|
||||
self.assertLess(score, 0.5)
|
||||
@@ -49,33 +54,40 @@ class TestScoreActionability(unittest.TestCase):
|
||||
|
||||
class TestScoreFreshness(unittest.TestCase):
|
||||
def test_recent_timestamp_scores_high(self):
|
||||
"""Verifies recent timestamp scores high logic."""
|
||||
recent = datetime.now(timezone.utc).isoformat()
|
||||
score = score_freshness(recent)
|
||||
self.assertGreater(score, 0.9)
|
||||
|
||||
def test_old_timestamp_scores_low(self):
|
||||
"""Verifies old timestamp scores low logic."""
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=365)).isoformat()
|
||||
score = score_freshness(old)
|
||||
self.assertLess(score, 0.2)
|
||||
|
||||
def test_none_returns_baseline(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
score = score_freshness(None)
|
||||
self.assertEqual(score, 0.5)
|
||||
|
||||
|
||||
class TestScoreSourceQuality(unittest.TestCase):
|
||||
def test_claude_scores_high(self):
|
||||
"""Verifies claude scores high logic."""
|
||||
self.assertGreater(score_source_quality("claude-sonnet"), 0.85)
|
||||
|
||||
def test_ollama_scores_lower(self):
|
||||
"""Verifies ollama scores lower logic."""
|
||||
self.assertLess(score_source_quality("ollama"), 0.7)
|
||||
|
||||
def test_unknown_returns_default(self):
|
||||
"""Verifies unknown returns default logic."""
|
||||
self.assertEqual(score_source_quality("unknown"), 0.5)
|
||||
|
||||
|
||||
class TestScoreEntry(unittest.TestCase):
|
||||
def test_good_entry_scores_high(self):
|
||||
"""Verifies good entry scores high logic."""
|
||||
entry = {
|
||||
"content": "To deploy: run `kubectl apply -f deployment.yaml`. Verify with `kubectl get pods`.",
|
||||
"model": "claude-sonnet",
|
||||
@@ -85,6 +97,7 @@ class TestScoreEntry(unittest.TestCase):
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_poor_entry_scores_low(self):
|
||||
"""Verifies poor entry scores low logic."""
|
||||
entry = {
|
||||
"content": "It depends. Various things might happen.",
|
||||
"model": "unknown",
|
||||
@@ -95,6 +108,7 @@ class TestScoreEntry(unittest.TestCase):
|
||||
|
||||
class TestFilterEntries(unittest.TestCase):
|
||||
def test_filters_low_quality(self):
|
||||
"""Verifies knowledge filtering by filters low quality."""
|
||||
entries = [
|
||||
{"content": "Run `deploy.py` to fix the issue.", "model": "claude"},
|
||||
{"content": "It might work sometimes.", "model": "unknown"},
|
||||
|
||||
Reference in New Issue
Block a user