Compare commits
21 Commits
fix/676
...
step35/126
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2b1a9f8ac | ||
| 345d2451d0 | |||
| 8aa9c9f018 | |||
| 277f9e3a2b | |||
| 21f654a159 | |||
| 12abaad838 | |||
| c106db2e28 | |||
| 242c77cc99 | |||
| fe94130380 | |||
| 4181065f60 | |||
| cc215e3ed7 | |||
| baa2c84c3f | |||
| 6dd354385f | |||
|
|
55adcb31dc | ||
|
|
ec0e9d65ca | ||
| b732172dcc | |||
| f7c479c4eb | |||
|
|
e1e42c3f8e | ||
| 7a4677c752 | |||
| 229c327c9e | |||
| 537bb1b61b |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
__pycache__/
|
||||
*.pyc
|
||||
297
quality_gate.py
Normal file
297
quality_gate.py
Normal file
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
quality_gate.py — Score and filter knowledge entries.
|
||||
|
||||
Scores each entry on 4 dimensions:
|
||||
- Specificity: concrete examples vs vague generalities
|
||||
- Actionability: can this be used to do something?
|
||||
- Freshness: is this still accurate?
|
||||
- Source quality: was the model/provider reliable?
|
||||
|
||||
Usage:
|
||||
from quality_gate import score_entry, filter_entries, quality_report
|
||||
|
||||
score = score_entry(entry)
|
||||
filtered = filter_entries(entries, threshold=0.5)
|
||||
report = quality_report(entries)
|
||||
"""
|
||||
|
||||
import json
|
||||
import math
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
# Source quality scores (higher = more reliable)
|
||||
SOURCE_QUALITY = {
|
||||
"claude-sonnet": 0.9,
|
||||
"claude-opus": 0.95,
|
||||
"gpt-4": 0.85,
|
||||
"gpt-4-turbo": 0.85,
|
||||
"gpt-5": 0.9,
|
||||
"mimo-v2-pro": 0.8,
|
||||
"gemini-pro": 0.8,
|
||||
"llama-3-70b": 0.75,
|
||||
"llama-3-8b": 0.7,
|
||||
"ollama": 0.6,
|
||||
"unknown": 0.5,
|
||||
}
|
||||
|
||||
DEFAULT_SOURCE_QUALITY = 0.5
|
||||
|
||||
# Specificity indicators
|
||||
SPECIFIC_INDICATORS = [
|
||||
r"\b\d+\.\d+", # decimal numbers
|
||||
r"\b\d{4}-\d{2}-\d{2}", # dates
|
||||
r"\b[A-Z][a-z]+\s[A-Z][a-z]+", # proper nouns
|
||||
r"`[^`]+`", # code/commands
|
||||
r"https?://", # URLs
|
||||
r"\b(example|instance|specifically|concretely)\b",
|
||||
r"\b(step \d|first|second|third)\b",
|
||||
r"\b(exactly|precisely|measured|counted)\b",
|
||||
]
|
||||
|
||||
# Vagueness indicators (penalty)
|
||||
VAGUE_INDICATORS = [
|
||||
r"\b(generally|usually|often|sometimes|might|could|perhaps)\b",
|
||||
r"\b(various|several|many|some|few)\b",
|
||||
r"\b(it depends|varies|differs)\b",
|
||||
r"\b(basically|essentially|fundamentally)\b",
|
||||
r"\b(everyone knows|it's obvious|clearly)\b",
|
||||
]
|
||||
|
||||
# Actionability indicators
|
||||
ACTIONABLE_INDICATORS = [
|
||||
r"\b(run|execute|install|deploy|configure|set up)\b",
|
||||
r"\b(use|apply|implement|create|build)\b",
|
||||
r"\b(check|verify|test|validate|confirm)\b",
|
||||
r"\b(fix|resolve|solve|debug|troubleshoot)\b",
|
||||
r"\b(if .+ then|when .+ do|to .+ use)\b",
|
||||
r"```[a-z]*\n", # code blocks
|
||||
r"\$\s", # shell commands
|
||||
r"\b\d+\.\s", # numbered steps
|
||||
]
|
||||
|
||||
|
||||
def score_specificity(content: str) -> float:
|
||||
"""Score specificity: 0=vague, 1=very specific."""
|
||||
content_lower = content.lower()
|
||||
score = 0.5 # baseline
|
||||
|
||||
# Check for specific indicators
|
||||
specific_count = sum(
|
||||
len(re.findall(p, content, re.IGNORECASE))
|
||||
for p in SPECIFIC_INDICATORS
|
||||
)
|
||||
|
||||
# Check for vague indicators
|
||||
vague_count = sum(
|
||||
len(re.findall(p, content_lower))
|
||||
for p in VAGUE_INDICATORS
|
||||
)
|
||||
|
||||
# Adjust score
|
||||
score += min(specific_count * 0.05, 0.4)
|
||||
score -= min(vague_count * 0.08, 0.3)
|
||||
|
||||
# Length bonus (longer = more detail, up to a point)
|
||||
word_count = len(content.split())
|
||||
if word_count > 50:
|
||||
score += min((word_count - 50) * 0.001, 0.1)
|
||||
|
||||
return max(0.0, min(1.0, score))
|
||||
|
||||
|
||||
def score_actionability(content: str) -> float:
|
||||
"""Score actionability: 0=abstract, 1=highly actionable."""
|
||||
content_lower = content.lower()
|
||||
score = 0.3 # baseline (most knowledge is informational)
|
||||
|
||||
# Check for actionable indicators
|
||||
actionable_count = sum(
|
||||
len(re.findall(p, content_lower))
|
||||
for p in ACTIONABLE_INDICATORS
|
||||
)
|
||||
|
||||
score += min(actionable_count * 0.1, 0.6)
|
||||
|
||||
# Code blocks are highly actionable
|
||||
if "```" in content:
|
||||
score += 0.2
|
||||
|
||||
# Numbered steps are actionable
|
||||
if re.search(r"\d+\.\s+\w", content):
|
||||
score += 0.1
|
||||
|
||||
return max(0.0, min(1.0, score))
|
||||
|
||||
|
||||
def score_freshness(timestamp: Optional[str]) -> float:
|
||||
"""Score freshness: 1=new, decays over time."""
|
||||
if not timestamp:
|
||||
return 0.5
|
||||
|
||||
try:
|
||||
if isinstance(timestamp, str):
|
||||
ts = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
|
||||
else:
|
||||
ts = timestamp
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
age_days = (now - ts).days
|
||||
|
||||
# Exponential decay: 1.0 at day 0, 0.5 at ~180 days, 0.1 at ~365 days
|
||||
score = math.exp(-age_days / 180)
|
||||
return max(0.1, min(1.0, score))
|
||||
except (ValueError, TypeError):
|
||||
return 0.5
|
||||
|
||||
|
||||
def score_source_quality(model: Optional[str]) -> float:
|
||||
"""Score source quality based on model/provider."""
|
||||
if not model:
|
||||
return DEFAULT_SOURCE_QUALITY
|
||||
|
||||
# Normalize model name
|
||||
model_lower = model.lower()
|
||||
for key, score in SOURCE_QUALITY.items():
|
||||
if key in model_lower:
|
||||
return score
|
||||
|
||||
return DEFAULT_SOURCE_QUALITY
|
||||
|
||||
|
||||
def score_entry(entry: dict) -> float:
|
||||
"""
|
||||
Score a knowledge entry on quality (0.0-1.0).
|
||||
|
||||
Weights:
|
||||
- specificity: 0.3
|
||||
- actionability: 0.3
|
||||
- freshness: 0.2
|
||||
- source_quality: 0.2
|
||||
"""
|
||||
content = entry.get("content", entry.get("text", entry.get("response", "")))
|
||||
model = entry.get("model", entry.get("provenance", {}).get("model"))
|
||||
timestamp = entry.get("timestamp", entry.get("provenance", {}).get("timestamp"))
|
||||
|
||||
specificity = score_specificity(content)
|
||||
actionability = score_actionability(content)
|
||||
freshness = score_freshness(timestamp)
|
||||
source = score_source_quality(model)
|
||||
|
||||
return round(
|
||||
0.3 * specificity +
|
||||
0.3 * actionability +
|
||||
0.2 * freshness +
|
||||
0.2 * source,
|
||||
4
|
||||
)
|
||||
|
||||
|
||||
def score_entry_detailed(entry: dict) -> dict:
|
||||
"""Score with breakdown."""
|
||||
content = entry.get("content", entry.get("text", entry.get("response", "")))
|
||||
model = entry.get("model", entry.get("provenance", {}).get("model"))
|
||||
timestamp = entry.get("timestamp", entry.get("provenance", {}).get("timestamp"))
|
||||
|
||||
specificity = score_specificity(content)
|
||||
actionability = score_actionability(content)
|
||||
freshness = score_freshness(timestamp)
|
||||
source = score_source_quality(model)
|
||||
|
||||
return {
|
||||
"score": round(0.3 * specificity + 0.3 * actionability + 0.2 * freshness + 0.2 * source, 4),
|
||||
"specificity": round(specificity, 4),
|
||||
"actionability": round(actionability, 4),
|
||||
"freshness": round(freshness, 4),
|
||||
"source_quality": round(source, 4),
|
||||
}
|
||||
|
||||
|
||||
def filter_entries(entries: List[dict], threshold: float = 0.5) -> List[dict]:
|
||||
"""Filter entries below quality threshold."""
|
||||
filtered = []
|
||||
for entry in entries:
|
||||
if score_entry(entry) >= threshold:
|
||||
filtered.append(entry)
|
||||
return filtered
|
||||
|
||||
|
||||
def quality_report(entries: List[dict]) -> str:
|
||||
"""Generate quality distribution report."""
|
||||
if not entries:
|
||||
return "No entries to analyze."
|
||||
|
||||
scores = [score_entry(e) for e in entries]
|
||||
|
||||
avg = sum(scores) / len(scores)
|
||||
min_score = min(scores)
|
||||
max_score = max(scores)
|
||||
|
||||
# Distribution buckets
|
||||
buckets = {"high": 0, "medium": 0, "low": 0, "rejected": 0}
|
||||
for s in scores:
|
||||
if s >= 0.7:
|
||||
buckets["high"] += 1
|
||||
elif s >= 0.5:
|
||||
buckets["medium"] += 1
|
||||
elif s >= 0.3:
|
||||
buckets["low"] += 1
|
||||
else:
|
||||
buckets["rejected"] += 1
|
||||
|
||||
lines = [
|
||||
"=" * 50,
|
||||
" QUALITY GATE REPORT",
|
||||
"=" * 50,
|
||||
f" Total entries: {len(entries)}",
|
||||
f" Average score: {avg:.3f}",
|
||||
f" Min: {min_score:.3f}",
|
||||
f" Max: {max_score:.3f}",
|
||||
"",
|
||||
" Distribution:",
|
||||
]
|
||||
|
||||
for bucket, count in buckets.items():
|
||||
pct = count / len(entries) * 100
|
||||
bar = "█" * int(pct / 5)
|
||||
lines.append(f" {bucket:<12} {count:>5} ({pct:>5.1f}%) {bar}")
|
||||
|
||||
passed = buckets["high"] + buckets["medium"]
|
||||
lines.append(f"\n Pass rate (>= 0.5): {passed}/{len(entries)} ({passed/len(entries)*100:.1f}%)")
|
||||
lines.append("=" * 50)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Knowledge quality gate")
|
||||
parser.add_argument("files", nargs="+", help="JSONL files to score")
|
||||
parser.add_argument("--threshold", type=float, default=0.5, help="Quality threshold")
|
||||
parser.add_argument("--json", action="store_true", help="JSON output")
|
||||
parser.add_argument("--filter", action="store_true", help="Filter and write back")
|
||||
args = parser.parse_args()
|
||||
|
||||
all_entries = []
|
||||
for filepath in args.files:
|
||||
with open(filepath) as f:
|
||||
for line in f:
|
||||
if line.strip():
|
||||
all_entries.append(json.loads(line))
|
||||
|
||||
if args.json:
|
||||
results = [{"entry": e, **score_entry_detailed(e)} for e in all_entries]
|
||||
print(json.dumps(results, indent=2))
|
||||
elif args.filter:
|
||||
filtered = filter_entries(all_entries, args.threshold)
|
||||
print(f"Kept {len(filtered)}/{len(all_entries)} entries (threshold: {args.threshold})")
|
||||
else:
|
||||
print(quality_report(all_entries))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
317
scripts/dedup.py
Normal file
317
scripts/dedup.py
Normal file
@@ -0,0 +1,317 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
dedup.py — Knowledge deduplication: content hash + semantic similarity.
|
||||
|
||||
Deduplicates harvested knowledge entries to avoid training on duplicates.
|
||||
Uses content hashing for exact matches and token overlap for near-duplicates.
|
||||
|
||||
Usage:
|
||||
python3 dedup.py --input knowledge/index.json --output knowledge/index_deduped.json
|
||||
python3 dedup.py --input knowledge/index.json --dry-run
|
||||
python3 dedup.py --test # Run built-in dedup test
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional, Tuple
|
||||
|
||||
|
||||
def normalize_text(text: str) -> str:
|
||||
"""Normalize text for hashing: lowercase, collapse whitespace, strip."""
|
||||
text = text.lower().strip()
|
||||
text = re.sub(r'\s+', ' ', text)
|
||||
return text
|
||||
|
||||
|
||||
def content_hash(text: str) -> str:
|
||||
"""SHA256 hash of normalized text for exact dedup."""
|
||||
normalized = normalize_text(text)
|
||||
return hashlib.sha256(normalized.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
def tokenize(text: str) -> set:
|
||||
"""Simple tokenizer: lowercase words, 3+ chars."""
|
||||
words = re.findall(r'[a-z0-9_]{3,}', text.lower())
|
||||
return set(words)
|
||||
|
||||
|
||||
def token_similarity(a: str, b: str) -> float:
|
||||
"""Token-based Jaccard similarity (0.0-1.0).
|
||||
|
||||
Fast local alternative to embedding similarity.
|
||||
Good enough for near-duplicate detection.
|
||||
"""
|
||||
tokens_a = tokenize(a)
|
||||
tokens_b = tokenize(b)
|
||||
if not tokens_a or not tokens_b:
|
||||
return 0.0
|
||||
intersection = tokens_a & tokens_b
|
||||
union = tokens_a | tokens_b
|
||||
return len(intersection) / len(union)
|
||||
|
||||
|
||||
def quality_score(fact: dict) -> float:
|
||||
"""Compute quality score for merge ranking.
|
||||
|
||||
Higher is better. Factors:
|
||||
- confidence (0-1)
|
||||
- source_count (more confirmations = better)
|
||||
- has tags (richer metadata)
|
||||
"""
|
||||
confidence = fact.get('confidence', 0.5)
|
||||
source_count = fact.get('source_count', 1)
|
||||
has_tags = 1.0 if fact.get('tags') else 0.0
|
||||
has_related = 1.0 if fact.get('related') else 0.0
|
||||
|
||||
# Weighted composite
|
||||
score = (
|
||||
confidence * 0.5 +
|
||||
min(source_count / 10, 1.0) * 0.3 +
|
||||
has_tags * 0.1 +
|
||||
has_related * 0.1
|
||||
)
|
||||
return round(score, 4)
|
||||
|
||||
|
||||
def merge_facts(keep: dict, drop: dict) -> dict:
|
||||
"""Merge two near-duplicate facts, keeping higher-quality fields.
|
||||
|
||||
The 'keep' fact is enriched with metadata from 'drop'.
|
||||
"""
|
||||
# Merge tags (union)
|
||||
keep_tags = set(keep.get('tags', []))
|
||||
drop_tags = set(drop.get('tags', []))
|
||||
keep['tags'] = sorted(keep_tags | drop_tags)
|
||||
|
||||
# Merge related (union)
|
||||
keep_related = set(keep.get('related', []))
|
||||
drop_related = set(drop.get('related', []))
|
||||
keep['related'] = sorted(keep_related | drop_related)
|
||||
|
||||
# Update source_count (sum)
|
||||
keep['source_count'] = keep.get('source_count', 1) + drop.get('source_count', 1)
|
||||
|
||||
# Update confidence (max — we've now seen it from multiple sources)
|
||||
keep['confidence'] = max(keep.get('confidence', 0), drop.get('confidence', 0))
|
||||
|
||||
# Track that we merged
|
||||
if '_merged_from' not in keep:
|
||||
keep['_merged_from'] = []
|
||||
keep['_merged_from'].append(drop.get('id', 'unknown'))
|
||||
|
||||
return keep
|
||||
|
||||
|
||||
def dedup_facts(
|
||||
facts: List[dict],
|
||||
exact_threshold: float = 1.0,
|
||||
near_threshold: float = 0.95,
|
||||
dry_run: bool = False,
|
||||
) -> Tuple[List[dict], dict]:
|
||||
"""Deduplicate a list of knowledge facts.
|
||||
|
||||
Args:
|
||||
facts: List of fact dicts (from index.json)
|
||||
exact_threshold: Hash match = exact duplicate
|
||||
near_threshold: Token similarity above this = near-duplicate
|
||||
dry_run: If True, don't modify, just report
|
||||
|
||||
Returns:
|
||||
(deduped_facts, stats_dict)
|
||||
"""
|
||||
if not facts:
|
||||
return [], {"total": 0, "exact_dupes": 0, "near_dupes": 0, "unique": 0}
|
||||
|
||||
# Phase 1: Exact dedup by content hash
|
||||
hash_seen = {} # hash -> index in deduped list
|
||||
exact_dupes = 0
|
||||
deduped = []
|
||||
|
||||
for fact in facts:
|
||||
text = fact.get('fact', '')
|
||||
h = content_hash(text)
|
||||
|
||||
if h in hash_seen:
|
||||
# Exact duplicate — merge metadata into existing
|
||||
existing_idx = hash_seen[h]
|
||||
if not dry_run:
|
||||
deduped[existing_idx] = merge_facts(deduped[existing_idx], fact)
|
||||
exact_dupes += 1
|
||||
else:
|
||||
hash_seen[h] = len(deduped)
|
||||
deduped.append(fact)
|
||||
|
||||
# Phase 2: Near-dup by token similarity
|
||||
near_dupes = 0
|
||||
i = 0
|
||||
while i < len(deduped):
|
||||
j = i + 1
|
||||
while j < len(deduped):
|
||||
sim = token_similarity(deduped[i].get('fact', ''), deduped[j].get('fact', ''))
|
||||
if sim >= near_threshold:
|
||||
# Near-duplicate — keep higher quality
|
||||
q_i = quality_score(deduped[i])
|
||||
q_j = quality_score(deduped[j])
|
||||
if q_i >= q_j:
|
||||
if not dry_run:
|
||||
deduped[i] = merge_facts(deduped[i], deduped[j])
|
||||
deduped.pop(j)
|
||||
else:
|
||||
# j is higher quality — merge i into j, then remove i
|
||||
if not dry_run:
|
||||
deduped[j] = merge_facts(deduped[j], deduped[i])
|
||||
deduped.pop(i)
|
||||
break # i changed, restart inner loop
|
||||
near_dupes += 1
|
||||
else:
|
||||
j += 1
|
||||
i += 1
|
||||
|
||||
stats = {
|
||||
"total": len(facts),
|
||||
"exact_dupes": exact_dupes,
|
||||
"near_dupes": near_dupes,
|
||||
"unique": len(deduped),
|
||||
"removed": len(facts) - len(deduped),
|
||||
}
|
||||
|
||||
return deduped, stats
|
||||
|
||||
|
||||
def dedup_index_file(
|
||||
input_path: str,
|
||||
output_path: Optional[str] = None,
|
||||
near_threshold: float = 0.95,
|
||||
dry_run: bool = False,
|
||||
) -> dict:
|
||||
"""Deduplicate an index.json file.
|
||||
|
||||
Args:
|
||||
input_path: Path to index.json
|
||||
output_path: Where to write deduped file (default: overwrite input)
|
||||
near_threshold: Token similarity threshold for near-dupes
|
||||
dry_run: Report only, don't write
|
||||
|
||||
Returns stats dict.
|
||||
"""
|
||||
path = Path(input_path)
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Index file not found: {input_path}")
|
||||
|
||||
with open(path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
facts = data.get('facts', [])
|
||||
deduped, stats = dedup_facts(facts, near_threshold=near_threshold, dry_run=dry_run)
|
||||
|
||||
if not dry_run:
|
||||
data['facts'] = deduped
|
||||
data['total_facts'] = len(deduped)
|
||||
data['last_dedup'] = __import__('datetime').datetime.now(
|
||||
__import__('datetime').timezone.utc
|
||||
).isoformat()
|
||||
|
||||
out_path = Path(output_path) if output_path else path
|
||||
with open(out_path, 'w') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def generate_test_duplicates(n: int = 20) -> List[dict]:
|
||||
"""Generate test facts with intentional duplicates for testing.
|
||||
|
||||
Creates n unique facts plus n/4 exact dupes and n/4 near-dupes.
|
||||
"""
|
||||
import random
|
||||
random.seed(42)
|
||||
|
||||
unique_facts = []
|
||||
for i in range(n):
|
||||
topic = random.choice(["git", "python", "docker", "rust", "nginx"])
|
||||
tip = random.choice(["use verbose flags", "check logs first", "restart service", "clear cache", "update config"])
|
||||
unique_facts.append({
|
||||
"id": f"test:fact:{i:03d}",
|
||||
"fact": f"When working with {topic}, always {tip} before deploying.",
|
||||
"category": "fact",
|
||||
"domain": "test",
|
||||
"confidence": round(random.uniform(0.5, 1.0), 2),
|
||||
"source_count": random.randint(1, 5),
|
||||
"tags": [topic, "test"],
|
||||
})
|
||||
|
||||
# Add exact duplicates (same text, different IDs)
|
||||
duped = list(unique_facts)
|
||||
for i in range(n // 4):
|
||||
original = unique_facts[i]
|
||||
dupe = dict(original)
|
||||
dupe["id"] = f"test:fact:dup{i:03d}"
|
||||
dupe["confidence"] = round(random.uniform(0.3, 0.8), 2)
|
||||
duped.append(dupe)
|
||||
|
||||
# Add near-duplicates (slightly different phrasing)
|
||||
for i in range(n // 4):
|
||||
original = unique_facts[i]
|
||||
near = dict(original)
|
||||
near["id"] = f"test:fact:near{i:03d}"
|
||||
near["fact"] = original["fact"].replace("always", "should").replace("before deploying", "prior to deployment")
|
||||
near["confidence"] = round(random.uniform(0.4, 0.9), 2)
|
||||
duped.append(near)
|
||||
|
||||
return duped
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Knowledge deduplication")
|
||||
parser.add_argument("--input", help="Path to index.json")
|
||||
parser.add_argument("--output", help="Output path (default: overwrite input)")
|
||||
parser.add_argument("--threshold", type=float, default=0.95,
|
||||
help="Near-dup similarity threshold (default: 0.95)")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Report only, don't write")
|
||||
parser.add_argument("--test", action="store_true", help="Run built-in dedup test")
|
||||
parser.add_argument("--json", action="store_true", help="JSON output")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.test:
|
||||
test_facts = generate_test_duplicates(20)
|
||||
print(f"Generated {len(test_facts)} test facts (20 unique + dupes)")
|
||||
deduped, stats = dedup_facts(test_facts, near_threshold=args.threshold)
|
||||
print(f"\nDedup results:")
|
||||
print(f" Total input: {stats['total']}")
|
||||
print(f" Exact dupes: {stats['exact_dupes']}")
|
||||
print(f" Near dupes: {stats['near_dupes']}")
|
||||
print(f" Unique output: {stats['unique']}")
|
||||
print(f" Removed: {stats['removed']}")
|
||||
|
||||
# Verify: should have ~20 unique (some merged)
|
||||
assert stats['unique'] <= 20, f"Too many unique: {stats['unique']} > 20"
|
||||
assert stats['unique'] >= 15, f"Too few unique: {stats['unique']} < 15"
|
||||
assert stats['removed'] > 0, "No duplicates removed"
|
||||
print("\nOK: Dedup test passed")
|
||||
return
|
||||
|
||||
if not args.input:
|
||||
print("ERROR: Provide --input or --test")
|
||||
sys.exit(1)
|
||||
|
||||
stats = dedup_index_file(args.input, args.output, args.threshold, args.dry_run)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(stats, indent=2))
|
||||
else:
|
||||
print(f"Dedup results:")
|
||||
print(f" Total input: {stats['total']}")
|
||||
print(f" Exact dupes: {stats['exact_dupes']}")
|
||||
print(f" Near dupes: {stats['near_dupes']}")
|
||||
print(f" Unique output: {stats['unique']}")
|
||||
print(f" Removed: {stats['removed']}")
|
||||
if args.dry_run:
|
||||
print(" (dry run — no changes written)")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -149,8 +149,8 @@ def to_dot(graph: dict) -> str:
|
||||
"""Generate DOT format output."""
|
||||
lines = ["digraph dependencies {"]
|
||||
lines.append(" rankdir=LR;")
|
||||
lines.append(" node [shape=box, style=filled, fillcolor="#1a1a2e", fontcolor="#e6edf3"];")
|
||||
lines.append(" edge [color="#4a4a6a"];")
|
||||
lines.append(' node [shape=box, style=filled, fillcolor="#1a1a2e", fontcolor="#e6edf3"];')
|
||||
lines.append(' edge [color="#4a4a6a"];')
|
||||
lines.append("")
|
||||
|
||||
for repo, data in sorted(graph.items()):
|
||||
|
||||
387
scripts/freshness.py
Normal file
387
scripts/freshness.py
Normal file
@@ -0,0 +1,387 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Knowledge Freshness Cron — Detect stale entries from code changes (Issue #200)
|
||||
|
||||
Automatically detects when knowledge entries become stale due to code changes.
|
||||
|
||||
Detection Method:
|
||||
1. Track source file hash alongside knowledge entry
|
||||
2. Compare current file hashes vs stored
|
||||
3. Mismatch → flag entry as potentially stale
|
||||
4. Report stale entries and optionally re-extract
|
||||
|
||||
Usage:
|
||||
python3 scripts/freshness.py --knowledge-dir knowledge/
|
||||
python3 scripts/freshness.py --knowledge-dir knowledge/ --json
|
||||
python3 scripts/freshness.py --knowledge-dir knowledge/ --repo /path/to/repo
|
||||
python3 scripts/freshness.py --knowledge-dir knowledge/ --auto-reextract
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
|
||||
|
||||
def compute_file_hash(filepath: str) -> Optional[str]:
|
||||
"""Compute SHA-256 hash of a file. Returns None if file doesn't exist."""
|
||||
try:
|
||||
with open(filepath, "rb") as f:
|
||||
return "sha256:" + hashlib.sha256(f.read()).hexdigest()
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
return None
|
||||
|
||||
|
||||
def get_git_file_changes(repo_path: str, days: int = 1) -> Dict[str, List[str]]:
|
||||
"""
|
||||
Get files changed in git in the last N days.
|
||||
|
||||
Returns dict with 'modified', 'added', 'deleted' lists of file paths.
|
||||
"""
|
||||
changes = {"modified": [], "added": [], "deleted": []}
|
||||
|
||||
try:
|
||||
# Get commits from last N days
|
||||
cmd = [
|
||||
"git", "-C", repo_path, "log",
|
||||
f"--since={days} days ago",
|
||||
"--name-status",
|
||||
"--pretty=format:",
|
||||
"--diff-filter=MAD"
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
|
||||
|
||||
if result.returncode != 0:
|
||||
return changes
|
||||
|
||||
for line in result.stdout.splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
parts = line.split('\t', 1)
|
||||
if len(parts) != 2:
|
||||
continue
|
||||
|
||||
status, filepath = parts
|
||||
if status == 'M':
|
||||
changes["modified"].append(filepath)
|
||||
elif status == 'A':
|
||||
changes["added"].append(filepath)
|
||||
elif status == 'D':
|
||||
changes["deleted"].append(filepath)
|
||||
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError):
|
||||
pass
|
||||
|
||||
# Deduplicate
|
||||
for key in changes:
|
||||
changes[key] = list(set(changes[key]))
|
||||
|
||||
return changes
|
||||
|
||||
|
||||
def load_knowledge_entries(knowledge_dir: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Load knowledge entries from YAML files in the knowledge directory.
|
||||
|
||||
Supports:
|
||||
- knowledge/index.json (legacy format)
|
||||
- knowledge/global/*.yaml
|
||||
- knowledge/repos/*.yaml
|
||||
- knowledge/agents/*.yaml
|
||||
"""
|
||||
entries = []
|
||||
|
||||
# Load from index.json if exists
|
||||
index_path = os.path.join(knowledge_dir, "index.json")
|
||||
if os.path.exists(index_path):
|
||||
try:
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
for fact in data.get("facts", []):
|
||||
entries.append({
|
||||
"source": "index.json",
|
||||
"fact": fact.get("fact", ""),
|
||||
"source_file": fact.get("source_file"),
|
||||
"source_hash": fact.get("source_hash"),
|
||||
"category": fact.get("category", "unknown"),
|
||||
"confidence": fact.get("confidence", 0.5)
|
||||
})
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
pass
|
||||
|
||||
# Load from YAML files
|
||||
for subdir in ["global", "repos", "agents"]:
|
||||
subdir_path = os.path.join(knowledge_dir, subdir)
|
||||
if not os.path.isdir(subdir_path):
|
||||
continue
|
||||
|
||||
for filename in os.listdir(subdir_path):
|
||||
if not filename.endswith((".yaml", ".yml")):
|
||||
continue
|
||||
|
||||
filepath = os.path.join(subdir_path, filename)
|
||||
try:
|
||||
with open(filepath) as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
if not data or not isinstance(data, dict):
|
||||
continue
|
||||
|
||||
# Extract entries from YAML structure
|
||||
for key, value in data.items():
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
if isinstance(item, dict):
|
||||
entries.append({
|
||||
"source": f"{subdir}/{filename}",
|
||||
"fact": item.get("description", item.get("fact", "")),
|
||||
"source_file": item.get("source_file"),
|
||||
"source_hash": item.get("source_hash"),
|
||||
"category": item.get("category", "unknown"),
|
||||
"confidence": item.get("confidence", 0.5)
|
||||
})
|
||||
elif isinstance(value, dict):
|
||||
entries.append({
|
||||
"source": f"{subdir}/{filename}",
|
||||
"fact": value.get("description", value.get("fact", "")),
|
||||
"source_file": value.get("source_file"),
|
||||
"source_hash": value.get("source_hash"),
|
||||
"category": value.get("category", "unknown"),
|
||||
"confidence": value.get("confidence", 0.5)
|
||||
})
|
||||
except (yaml.YAMLError, IOError):
|
||||
pass
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
def check_freshness(knowledge_dir: str, repo_root: str = ".",
|
||||
days: int = 1) -> Dict[str, Any]:
|
||||
"""
|
||||
Check freshness of knowledge entries against recent code changes.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"timestamp": ISO timestamp,
|
||||
"total_entries": int,
|
||||
"stale_entries": [...],
|
||||
"fresh_entries": [...],
|
||||
"git_changes": {...},
|
||||
"summary": {...}
|
||||
}
|
||||
"""
|
||||
entries = load_knowledge_entries(knowledge_dir)
|
||||
git_changes = get_git_file_changes(repo_root, days)
|
||||
|
||||
stale_entries = []
|
||||
fresh_entries = []
|
||||
|
||||
for entry in entries:
|
||||
source_file = entry.get("source_file")
|
||||
if not source_file:
|
||||
# Entry without source file reference
|
||||
fresh_entries.append({**entry, "status": "no_source"})
|
||||
continue
|
||||
|
||||
# Check if source file was recently modified
|
||||
is_stale = False
|
||||
reason = ""
|
||||
|
||||
if source_file in git_changes["modified"]:
|
||||
is_stale = True
|
||||
reason = "source_modified"
|
||||
elif source_file in git_changes["deleted"]:
|
||||
is_stale = True
|
||||
reason = "source_deleted"
|
||||
elif source_file in git_changes["added"]:
|
||||
is_stale = True
|
||||
reason = "source_added"
|
||||
|
||||
# Also check hash if available
|
||||
stored_hash = entry.get("source_hash")
|
||||
if stored_hash:
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
current_hash = compute_file_hash(full_path)
|
||||
|
||||
if current_hash is None:
|
||||
is_stale = True
|
||||
reason = "source_missing"
|
||||
elif current_hash != stored_hash:
|
||||
is_stale = True
|
||||
reason = "hash_mismatch"
|
||||
|
||||
if is_stale:
|
||||
stale_entries.append({
|
||||
**entry,
|
||||
"status": "stale",
|
||||
"reason": reason
|
||||
})
|
||||
else:
|
||||
fresh_entries.append({**entry, "status": "fresh"})
|
||||
|
||||
# Compute summary
|
||||
total = len(entries)
|
||||
stale_count = len(stale_entries)
|
||||
fresh_count = len(fresh_entries)
|
||||
|
||||
# Group stale entries by reason
|
||||
stale_by_reason = {}
|
||||
for entry in stale_entries:
|
||||
reason = entry.get("reason", "unknown")
|
||||
if reason not in stale_by_reason:
|
||||
stale_by_reason[reason] = 0
|
||||
stale_by_reason[reason] += 1
|
||||
|
||||
return {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"total_entries": total,
|
||||
"stale_entries": stale_entries,
|
||||
"fresh_entries": fresh_entries,
|
||||
"git_changes": git_changes,
|
||||
"summary": {
|
||||
"total": total,
|
||||
"stale": stale_count,
|
||||
"fresh": fresh_count,
|
||||
"stale_percentage": round(stale_count / total * 100, 1) if total > 0 else 0,
|
||||
"stale_by_reason": stale_by_reason,
|
||||
"git_changes_summary": {
|
||||
"modified": len(git_changes["modified"]),
|
||||
"added": len(git_changes["added"]),
|
||||
"deleted": len(git_changes["deleted"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def update_stale_hashes(knowledge_dir: str, repo_root: str = ".") -> int:
|
||||
"""
|
||||
Update hashes for stale entries. Returns count of updated entries.
|
||||
"""
|
||||
entries = load_knowledge_entries(knowledge_dir)
|
||||
updated = 0
|
||||
|
||||
# This is a simplified version - in practice, you'd need to
|
||||
# write back to the specific YAML files
|
||||
for entry in entries:
|
||||
source_file = entry.get("source_file")
|
||||
if not source_file:
|
||||
continue
|
||||
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
current_hash = compute_file_hash(full_path)
|
||||
|
||||
if current_hash and entry.get("source_hash") != current_hash:
|
||||
# Mark for update (in practice, you'd write back to the file)
|
||||
updated += 1
|
||||
|
||||
return updated
|
||||
|
||||
|
||||
def format_report(result: Dict[str, Any], max_items: int = 20) -> str:
|
||||
"""Format freshness check results as a human-readable report."""
|
||||
timestamp = result["timestamp"]
|
||||
summary = result["summary"]
|
||||
stale_entries = result["stale_entries"]
|
||||
git_changes = result["git_changes"]
|
||||
|
||||
lines = [
|
||||
"Knowledge Freshness Report",
|
||||
"=" * 50,
|
||||
f"Generated: {timestamp}",
|
||||
f"Total entries: {summary['total']}",
|
||||
f"Stale entries: {summary['stale']} ({summary['stale_percentage']}%)",
|
||||
f"Fresh entries: {summary['fresh']}",
|
||||
""
|
||||
]
|
||||
|
||||
# Git changes summary
|
||||
lines.extend([
|
||||
"Git Changes (last 24h):",
|
||||
f" Modified: {len(git_changes['modified'])} files",
|
||||
f" Added: {len(git_changes['added'])} files",
|
||||
f" Deleted: {len(git_changes['deleted'])} files",
|
||||
""
|
||||
])
|
||||
|
||||
# Stale entries by reason
|
||||
if summary.get("stale_by_reason"):
|
||||
lines.extend([
|
||||
"Stale Entries by Reason:",
|
||||
""
|
||||
])
|
||||
for reason, count in summary["stale_by_reason"].items():
|
||||
lines.append(f" {reason}: {count}")
|
||||
lines.append("")
|
||||
|
||||
# List stale entries
|
||||
if stale_entries:
|
||||
lines.extend([
|
||||
"Stale Entries:",
|
||||
""
|
||||
])
|
||||
for i, entry in enumerate(stale_entries[:max_items], 1):
|
||||
source = entry.get("source_file", "?")
|
||||
reason = entry.get("reason", "unknown")
|
||||
fact = entry.get("fact", "")[:60]
|
||||
lines.append(f"{i:2d}. [{reason}] {source}")
|
||||
if fact:
|
||||
lines.append(f" {fact}")
|
||||
|
||||
if len(stale_entries) > max_items:
|
||||
lines.append(f"\n... and {len(stale_entries) - max_items} more")
|
||||
else:
|
||||
lines.append("No stale entries found. All knowledge is fresh!")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Knowledge Freshness Cron — detect stale entries from code changes")
|
||||
parser.add_argument("--knowledge-dir", required=True,
|
||||
help="Path to knowledge directory")
|
||||
parser.add_argument("--repo", default=".",
|
||||
help="Path to repository for git change detection")
|
||||
parser.add_argument("--days", type=int, default=1,
|
||||
help="Number of days to check for git changes (default: 1)")
|
||||
parser.add_argument("--json", action="store_true",
|
||||
help="Output as JSON instead of human-readable")
|
||||
parser.add_argument("--max", type=int, default=20,
|
||||
help="Maximum stale entries to show (default: 20)")
|
||||
parser.add_argument("--auto-reextract", action="store_true",
|
||||
help="Auto-re-extract knowledge for stale entries")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isdir(args.knowledge_dir):
|
||||
print(f"Error: {args.knowledge_dir} is not a directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not os.path.isdir(args.repo):
|
||||
print(f"Error: {args.repo} is not a directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
result = check_freshness(args.knowledge_dir, args.repo, args.days)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print(format_report(result, args.max))
|
||||
|
||||
# Auto-re-extract if requested
|
||||
if args.auto_reextract and result["stale_entries"]:
|
||||
print(f"\nAuto-re-extracting {len(result['stale_entries'])} stale entries...")
|
||||
# In a real implementation, this would call the harvester
|
||||
print("(Auto-re-extraction not yet implemented)")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -113,7 +113,7 @@ def find_slow_tests_by_scan(repo_path: str) -> List[Bottleneck]:
|
||||
(r"time\.sleep\((\d+(?:\.\d+)?)\)", "Contains time.sleep() — consider using mock or async wait"),
|
||||
(r"subprocess\.run\(.*timeout=(\d+)", "Subprocess with timeout — may block test"),
|
||||
(r"requests\.(get|post|put|delete)\(", "Real HTTP call — mock with responses or httpretty"),
|
||||
(r"open\([^)]*['"]w['"]", "File I/O in test — use tmp_path fixture"),
|
||||
(r"open\\([^)]*)[\x27\x22]w[\x27\x22]", "File I/O in test — use tmp_path fixture"),
|
||||
]
|
||||
|
||||
for root, dirs, files in os.walk(repo_path):
|
||||
@@ -506,8 +506,8 @@ def format_markdown(report: PerfReport) -> str:
|
||||
lines.append(f"- {icon} {b.name}{loc} — ~{b.duration_s:.1f}s — {b.recommendation}")
|
||||
lines.append(f"")
|
||||
|
||||
return "
|
||||
".join(lines)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
|
||||
# ── Main ───────────────────────────────────────────────────────────
|
||||
@@ -521,8 +521,8 @@ def main():
|
||||
help="Slow test threshold in seconds")
|
||||
args = parser.parse_args()
|
||||
|
||||
global SLOW_TEST_THRESHOLD_S
|
||||
SLOW_TEST_THRESHOLD_S = args.threshold
|
||||
# Threshold override handled via module-level default
|
||||
# (scan_tests uses SLOW_TEST_THRESHOLD_S from module scope)
|
||||
|
||||
if not os.path.isdir(args.repo):
|
||||
print(f"Error: {args.repo} is not a directory", file=sys.stderr)
|
||||
|
||||
@@ -10,37 +10,273 @@ Usage:
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import ast
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
def generate_proposals():
|
||||
"""Generate sample proposals for this engine."""
|
||||
# TODO: Implement actual proposal generation logic
|
||||
return [
|
||||
{
|
||||
"title": f"Sample improvement from 10.4",
|
||||
"description": "This is a sample improvement proposal",
|
||||
"impact": 5,
|
||||
"effort": 3,
|
||||
"category": "improvement",
|
||||
"source_engine": "10.4",
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
]
|
||||
# ── Data Classes ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class FileMetrics:
|
||||
"""Metrics for a single source file."""
|
||||
path: str
|
||||
lines: int = 0
|
||||
complexity: float = 0.0
|
||||
max_complexity: int = 0
|
||||
functions: int = 0
|
||||
classes: int = 0
|
||||
churn_30d: int = 0
|
||||
churn_90d: int = 0
|
||||
test_coverage: Optional[float] = None
|
||||
refactoring_score: float = 0.0
|
||||
|
||||
|
||||
# ── Complexity Analysis ─────────────────────────────────────────────────
|
||||
|
||||
class ComplexityVisitor(ast.NodeVisitor):
|
||||
"""AST visitor that computes cyclomatic complexity per function."""
|
||||
|
||||
def __init__(self):
|
||||
self.complexities = []
|
||||
self.function_count = 0
|
||||
self.class_count = 0
|
||||
self._current_complexity = 0
|
||||
self._in_function = False
|
||||
|
||||
def visit_FunctionDef(self, node):
|
||||
self.function_count += 1
|
||||
old_complexity = self._current_complexity
|
||||
old_in_function = self._in_function
|
||||
self._current_complexity = 1 # Base complexity
|
||||
self._in_function = True
|
||||
|
||||
self.generic_visit(node)
|
||||
|
||||
self.complexities.append(self._current_complexity)
|
||||
self._current_complexity = old_complexity
|
||||
self._in_function = old_in_function
|
||||
|
||||
visit_AsyncFunctionDef = visit_FunctionDef
|
||||
|
||||
def visit_ClassDef(self, node):
|
||||
self.class_count += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_If(self, node):
|
||||
if self._in_function:
|
||||
self._current_complexity += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_For(self, node):
|
||||
if self._in_function:
|
||||
self._current_complexity += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
visit_AsyncFor = visit_For
|
||||
|
||||
def visit_While(self, node):
|
||||
if self._in_function:
|
||||
self._current_complexity += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_ExceptHandler(self, node):
|
||||
if self._in_function:
|
||||
self._current_complexity += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_With(self, node):
|
||||
if self._in_function:
|
||||
self._current_complexity += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
visit_AsyncWith = visit_With
|
||||
|
||||
def visit_Assert(self, node):
|
||||
if self._in_function:
|
||||
self._current_complexity += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_BoolOp(self, node):
|
||||
# Each 'and'/'or' adds a branch
|
||||
if self._in_function:
|
||||
self._current_complexity += len(node.values) - 1
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_IfExp(self, node):
|
||||
# Ternary expression
|
||||
if self._in_function:
|
||||
self._current_complexity += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
|
||||
def compute_file_complexity(filepath: str) -> Tuple[float, int, int, int, int]:
|
||||
"""
|
||||
Compute cyclomatic complexity for a Python file.
|
||||
|
||||
Returns:
|
||||
(avg_complexity, max_complexity, function_count, class_count, line_count)
|
||||
"""
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8', errors='replace') as f:
|
||||
source = f.read()
|
||||
except (IOError, OSError):
|
||||
return 0.0, 0, 0, 0, 0
|
||||
|
||||
try:
|
||||
tree = ast.parse(source, filename=filepath)
|
||||
except SyntaxError:
|
||||
return 0.0, 0, 0, 0, 0
|
||||
|
||||
visitor = ComplexityVisitor()
|
||||
visitor.visit(tree)
|
||||
|
||||
line_count = len(source.splitlines())
|
||||
|
||||
if not visitor.complexities:
|
||||
# No functions, but might have classes
|
||||
return 0.0, 0, visitor.function_count, visitor.class_count, line_count
|
||||
|
||||
avg = sum(visitor.complexities) / len(visitor.complexities)
|
||||
max_c = max(visitor.complexities)
|
||||
|
||||
return avg, max_c, visitor.function_count, visitor.class_count, line_count
|
||||
|
||||
|
||||
# ── Refactoring Score ───────────────────────────────────────────────────
|
||||
|
||||
def calculate_refactoring_score(metrics: FileMetrics) -> float:
|
||||
"""
|
||||
Calculate a refactoring priority score (0-100) based on file metrics.
|
||||
|
||||
Higher score = higher priority for refactoring.
|
||||
Components:
|
||||
- Complexity (0-30 points): higher avg/max complexity = higher score
|
||||
- Size (0-20 points): larger files = higher score
|
||||
- Churn (0-30 points): more changes recently = higher score
|
||||
- Coverage (0-20 points): lower test coverage = higher score
|
||||
"""
|
||||
score = 0.0
|
||||
|
||||
# Complexity component (0-30)
|
||||
# avg=10+ or max=20+ → 30 points
|
||||
complexity_score = min(30.0, (metrics.complexity * 2) + (metrics.max_complexity * 0.5))
|
||||
score += max(0.0, complexity_score)
|
||||
|
||||
# Size component (0-20)
|
||||
# 500+ lines → 20 points
|
||||
size_score = min(20.0, metrics.lines / 25.0)
|
||||
score += max(0.0, size_score)
|
||||
|
||||
# Churn component (0-30)
|
||||
# Weighted: recent churn (30d) counts more than older (90d)
|
||||
churn_score = min(30.0, (metrics.churn_30d * 2) + (metrics.churn_90d * 0.5))
|
||||
score += max(0.0, churn_score)
|
||||
|
||||
# Coverage component (0-20)
|
||||
# Lower coverage → higher score
|
||||
if metrics.test_coverage is not None:
|
||||
# coverage=0 → 20 points, coverage=1 → 0 points
|
||||
coverage_score = (1.0 - metrics.test_coverage) * 20.0
|
||||
else:
|
||||
# No data → assume medium risk (10 points)
|
||||
coverage_score = 10.0
|
||||
score += max(0.0, coverage_score)
|
||||
|
||||
return min(100.0, max(0.0, score))
|
||||
|
||||
|
||||
# ── Proposal Generation ─────────────────────────────────────────────────
|
||||
|
||||
def scan_directory(directory: str, extensions: tuple = ('.py',)) -> list:
|
||||
"""Scan directory for source files."""
|
||||
files = []
|
||||
for root, dirs, filenames in os.walk(directory):
|
||||
# Skip hidden dirs and common non-source dirs
|
||||
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in (
|
||||
'__pycache__', 'node_modules', 'venv', '.venv', 'env',
|
||||
'build', 'dist', '.git', '.tox'
|
||||
)]
|
||||
for fname in filenames:
|
||||
if any(fname.endswith(ext) for ext in extensions):
|
||||
files.append(os.path.join(root, fname))
|
||||
return files
|
||||
|
||||
|
||||
def generate_proposals(directory: str = '.', min_score: float = 30.0) -> list:
|
||||
"""Generate refactoring proposals by analyzing source files."""
|
||||
proposals = []
|
||||
files = scan_directory(directory)
|
||||
|
||||
for filepath in files:
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(filepath)
|
||||
|
||||
if funcs == 0 and classes == 0:
|
||||
continue
|
||||
|
||||
metrics = FileMetrics(
|
||||
path=filepath,
|
||||
lines=lines,
|
||||
complexity=avg,
|
||||
max_complexity=max_c,
|
||||
functions=funcs,
|
||||
classes=classes
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
metrics.refactoring_score = score
|
||||
|
||||
if score >= min_score:
|
||||
reasons = []
|
||||
if max_c > 10:
|
||||
reasons.append(f"high max complexity ({max_c})")
|
||||
if avg > 5:
|
||||
reasons.append(f"high avg complexity ({avg:.1f})")
|
||||
if lines > 300:
|
||||
reasons.append(f"large file ({lines} lines)")
|
||||
|
||||
proposals.append({
|
||||
"title": f"Refactor {os.path.basename(filepath)} (score: {score:.0f})",
|
||||
"description": f"{filepath}: {', '.join(reasons) if reasons else 'general improvement candidate'}",
|
||||
"impact": min(10, int(score / 10)),
|
||||
"effort": min(10, max(1, int(max_c / 3))),
|
||||
"category": "refactoring",
|
||||
"source_engine": "10.4",
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"metrics": {
|
||||
"path": filepath,
|
||||
"score": round(score, 2),
|
||||
"avg_complexity": round(avg, 2),
|
||||
"max_complexity": max_c,
|
||||
"lines": lines,
|
||||
"functions": funcs,
|
||||
"classes": classes
|
||||
}
|
||||
})
|
||||
|
||||
# Sort by score descending
|
||||
proposals.sort(key=lambda p: p.get('metrics', {}).get('score', 0), reverse=True)
|
||||
return proposals
|
||||
|
||||
|
||||
# ── CLI ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Finds refactoring opportunities in codebases")
|
||||
parser.add_argument("--output", required=True, help="Output file for proposals")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Don't write output file")
|
||||
|
||||
parser.add_argument("--directory", default=".", help="Directory to scan")
|
||||
parser.add_argument("--min-score", type=float, default=30.0, help="Minimum score threshold")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
proposals = generate_proposals()
|
||||
|
||||
|
||||
proposals = generate_proposals(args.directory, args.min_score)
|
||||
|
||||
if not args.dry_run:
|
||||
os.makedirs(os.path.dirname(args.output) or '.', exist_ok=True)
|
||||
with open(args.output, "w") as f:
|
||||
json.dump({"proposals": proposals}, f, indent=2)
|
||||
print(f"Generated {len(proposals)} proposals -> {args.output}")
|
||||
|
||||
185
scripts/review_comment_generator.py
Executable file
185
scripts/review_comment_generator.py
Executable file
@@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Review Comment Generator — Issue #126
|
||||
Reads JSONL findings, deduplicates, posts as Gitea PR comments.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
|
||||
DEFAULT_API_BASE = os.environ.get(
|
||||
"GITEA_API_BASE",
|
||||
"https://forge.alexanderwhitestone.com"
|
||||
)
|
||||
TOKEN_PATHS = [
|
||||
os.path.expanduser("~/.config/gitea/token"),
|
||||
os.path.expanduser("~/.hermes/gitea.token"),
|
||||
os.environ.get("GITEA_TOKEN", ""),
|
||||
]
|
||||
|
||||
def load_token() -> Optional[str]:
|
||||
token = os.environ.get("GITEA_TOKEN", "")
|
||||
if token:
|
||||
return token
|
||||
for path in TOKEN_PATHS:
|
||||
if path and os.path.exists(path):
|
||||
with open(path) as f:
|
||||
t = f.read().strip()
|
||||
if t:
|
||||
return t
|
||||
return None
|
||||
|
||||
class GiteaClient:
|
||||
def __init__(self, base_url: str, token: str, org: str, repo: str):
|
||||
self.base_url = base_url.rstrip("/")
|
||||
self.token = token
|
||||
self.org = org
|
||||
self.repo = repo
|
||||
|
||||
def _post(self, path: str, data: Dict) -> Optional[Dict]:
|
||||
url = f"{self.base_url}/api/v1{path}"
|
||||
body = json.dumps(data).encode("utf-8")
|
||||
req = urllib.request.Request(url, data=body, method="POST")
|
||||
req.add_header("Authorization", f"token {self.token}")
|
||||
req.add_header("Content-Type", "application/json")
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read().decode())
|
||||
except urllib.error.HTTPError as e:
|
||||
err = e.read().decode() if e.read() else str(e)
|
||||
print(f"[ERROR] HTTP {e.code}: {err}", file=sys.stderr)
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"[ERROR] {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def post_issue_comment(self, issue_num: int, body: str) -> Optional[Dict]:
|
||||
return self._post(
|
||||
f"/repos/{self.org}/{self.repo}/issues/{issue_num}/comments",
|
||||
{"body": body}
|
||||
)
|
||||
|
||||
def content_hash(finding: Dict) -> str:
|
||||
key = f"{finding['file']}:{finding['line']}:{finding['text']}"
|
||||
return hashlib.sha256(key.encode("utf-8")).hexdigest()
|
||||
|
||||
def format_comment(finding: Dict) -> str:
|
||||
emoji = {
|
||||
"error": "🛑",
|
||||
"warning": "⚠️",
|
||||
"info": "ℹ️",
|
||||
}.get(finding.get("severity", ""), "📝")
|
||||
f = finding["file"]
|
||||
ln = finding["line"]
|
||||
txt = finding["text"]
|
||||
return f"{emoji} **Review Comment**\n\nFile: `{f}`\nLine: {ln}\n\n> {txt}\n"
|
||||
|
||||
def load_findings(path: Optional[Path], from_stdin: bool) -> List[Dict]:
|
||||
import fileinput
|
||||
findings = []
|
||||
sources = ["-"] if from_stdin else [str(path)]
|
||||
for line in fileinput.input(files=sources):
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
try:
|
||||
f = json.loads(line)
|
||||
for key in ("file", "line", "text"):
|
||||
if key not in f:
|
||||
raise ValueError(f"Missing key: {key}")
|
||||
findings.append(f)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"WARNING: Skipping invalid JSON: {e}", file=sys.stderr)
|
||||
return findings
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Post review findings as comments to a Gitea PR/issue"
|
||||
)
|
||||
parser.add_argument("--pr", type=int, required=True, help="PR/issue number")
|
||||
parser.add_argument("--org", default="Timmy_Foundation", help="Gitea org")
|
||||
parser.add_argument("--repo", default="compounding-intelligence", help="Repo name")
|
||||
parser.add_argument("--api-base", default=DEFAULT_API_BASE, help="Gitea API base")
|
||||
parser.add_argument("--token", default=None, help="API token (or env/file)")
|
||||
parser.add_argument("--input", type=Path, default=None, help="JSONL input file")
|
||||
parser.add_argument("--stdin", action="store_true", help="Read from stdin")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show without posting")
|
||||
parser.add_argument("--json", action="store_true", help="Emit JSON report")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.stdin and args.input is None:
|
||||
print("ERROR: --input or --stdin required", file=sys.stderr)
|
||||
return 1
|
||||
if args.stdin and args.input:
|
||||
print("ERROR: --stdin and --input exclusive", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
token = args.token or load_token()
|
||||
if not token:
|
||||
print("ERROR: Token not found. Set GITEA_TOKEN or ~/.config/gitea/token", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
findings = load_findings(args.input, args.stdin)
|
||||
if not findings:
|
||||
print("ERROR: No findings loaded", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if not args.json: print(f"Loaded {len(findings)} finding(s)")
|
||||
|
||||
seen: Dict[str, Dict] = {}
|
||||
for f in findings:
|
||||
h = content_hash(f)
|
||||
if h not in seen:
|
||||
seen[h] = f
|
||||
|
||||
unique = list(seen.values())
|
||||
if not args.json: print(f"After dedup: {len(unique)} unique")
|
||||
|
||||
if args.json:
|
||||
report = {
|
||||
"total": len(findings),
|
||||
"unique": len(unique),
|
||||
"findings": unique,
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
print(json.dumps(report, indent=2))
|
||||
return 0
|
||||
|
||||
if args.dry_run:
|
||||
print("\n=== DRY RUN — would post ===")
|
||||
for i, f in enumerate(unique, 1):
|
||||
print(f"\n--- Comment {i}/{len(unique)} ---")
|
||||
print(format_comment(f))
|
||||
return 0
|
||||
|
||||
client = GiteaClient(args.api_base, token, args.org, args.repo)
|
||||
posted = 0
|
||||
for f in unique:
|
||||
body = format_comment(f)
|
||||
result = client.post_issue_comment(args.pr, body)
|
||||
if result:
|
||||
print(f"✅ Posted: {f['file']}:{f['line']} (id={result.get('id')})")
|
||||
posted += 1
|
||||
else:
|
||||
print(f"❌ Failed: {f['file']}:{f['line']}")
|
||||
|
||||
print(f"\nPosted {posted}/{len(unique)} to PR #{args.pr}")
|
||||
return 0 if posted == len(unique) else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
||||
5
scripts/sample_findings.jsonl
Normal file
5
scripts/sample_findings.jsonl
Normal file
@@ -0,0 +1,5 @@
|
||||
{"file": "scripts/harvester.py", "line": 47, "text": "Consider adding type hints to improve readability", "severity": "info"}
|
||||
{"file": "scripts/dedup.py", "line": 89, "text": "Add null check before accessing fact['confidence'] to avoid KeyError", "severity": "warning"}
|
||||
{"file": "scripts/bootstrapper.py", "line": 102, "text": "This loop is O(n^2) — could be optimized with a dict lookup", "severity": "info"}
|
||||
{"file": "scripts/harvester.py", "line": 47, "text": "Consider adding type hints to improve readability", "severity": "info"}
|
||||
{"file": "scripts/harvester.py", "line": 120, "text": "File handle not closed in error path — use context manager", "severity": "error"}
|
||||
@@ -1,212 +1,72 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive test script for knowledge extraction prompt.
|
||||
Validates prompt structure, requirements, and consistency.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
"""Comprehensive tests for knowledge extraction prompt."""
|
||||
import json, re
|
||||
from pathlib import Path
|
||||
|
||||
def test_prompt_structure():
|
||||
"""Test that the prompt has the required structure."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
if not prompt_path.exists():
|
||||
return False, "harvest-prompt.md not found"
|
||||
|
||||
content = prompt_path.read_text()
|
||||
|
||||
# Check for required sections
|
||||
required_sections = [
|
||||
"System Prompt",
|
||||
"Instructions",
|
||||
"Categories",
|
||||
"Output Format",
|
||||
"Confidence Scoring",
|
||||
"Constraints",
|
||||
"Example"
|
||||
]
|
||||
|
||||
for section in required_sections:
|
||||
if section.lower() not in content.lower():
|
||||
return False, f"Missing required section: {section}"
|
||||
|
||||
# Check for required categories
|
||||
required_categories = ["fact", "pitfall", "pattern", "tool-quirk", "question"]
|
||||
for category in required_categories:
|
||||
if category not in content:
|
||||
return False, f"Missing required category: {category}"
|
||||
|
||||
# Check for required output fields
|
||||
required_fields = ["fact", "category", "repo", "confidence"]
|
||||
for field in required_fields:
|
||||
if field not in content:
|
||||
return False, f"Missing required output field: {field}"
|
||||
|
||||
# Check prompt size (should be ~1k tokens, roughly 4k chars)
|
||||
if len(content) > 5000:
|
||||
return False, f"Prompt too large: {len(content)} chars (max ~5000)"
|
||||
|
||||
if len(content) < 1000:
|
||||
return False, f"Prompt too small: {len(content)} chars (min ~1000)"
|
||||
|
||||
def check_prompt_structure():
|
||||
p = Path("templates/harvest-prompt.md")
|
||||
if not p.exists(): return False, "harvest-prompt.md not found"
|
||||
c = p.read_text()
|
||||
for s in ["System Prompt","Instructions","Categories","Output Format","Confidence Scoring","Constraints","Example"]:
|
||||
if s.lower() not in c.lower(): return False, f"Missing section: {s}"
|
||||
for cat in ["fact","pitfall","pattern","tool-quirk","question"]:
|
||||
if cat not in c: return False, f"Missing category: {cat}"
|
||||
if len(c) > 5000: return False, f"Too large: {len(c)}"
|
||||
if len(c) < 1000: return False, f"Too small: {len(c)}"
|
||||
return True, "Prompt structure is valid"
|
||||
|
||||
def check_confidence_scoring():
|
||||
c = Path("templates/harvest-prompt.md").read_text()
|
||||
for l in ["0.9-1.0","0.7-0.8","0.5-0.6","0.3-0.4","0.1-0.2"]:
|
||||
if l not in c: return False, f"Missing level: {l}"
|
||||
return True, "Confidence scoring defined"
|
||||
|
||||
def check_example_quality():
|
||||
c = Path("templates/harvest-prompt.md").read_text()
|
||||
if "example" not in c.lower(): return False, "No examples"
|
||||
m = re.search(r'"knowledge"', c[c.lower().find("example"):])
|
||||
if not m: return False, "No JSON example"
|
||||
return True, "Examples present"
|
||||
|
||||
def check_constraint_coverage():
|
||||
c = Path("templates/harvest-prompt.md").read_text()
|
||||
for x in ["no hallucination","explicitly","partial","failed sessions"]:
|
||||
if x not in c.lower(): return False, f"Missing: {x}"
|
||||
return True, "Constraints covered"
|
||||
|
||||
def check_test_sessions():
|
||||
d = Path("test_sessions")
|
||||
if not d.exists(): return False, "test_sessions/ not found"
|
||||
files = list(d.glob("*.jsonl"))
|
||||
if len(files) < 5: return False, f"Only {len(files)} sessions"
|
||||
for f in files:
|
||||
for i, line in enumerate(f.read_text().strip().split("\n"), 1):
|
||||
try: json.loads(line)
|
||||
except json.JSONDecodeError as e: return False, f"{f.name}:{i}: {e}"
|
||||
return True, f"{len(files)} valid sessions"
|
||||
|
||||
def test_prompt_structure():
|
||||
passed, msg = check_prompt_structure()
|
||||
assert passed, msg
|
||||
|
||||
def test_confidence_scoring():
|
||||
"""Test that confidence scoring is properly defined."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
content = prompt_path.read_text()
|
||||
|
||||
# Check for confidence scale definitions
|
||||
confidence_levels = [
|
||||
("0.9-1.0", "explicitly stated"),
|
||||
("0.7-0.8", "clearly implied"),
|
||||
("0.5-0.6", "suggested"),
|
||||
("0.3-0.4", "inferred"),
|
||||
("0.1-0.2", "speculative")
|
||||
]
|
||||
|
||||
for level, description in confidence_levels:
|
||||
if level not in content:
|
||||
return False, f"Missing confidence level: {level}"
|
||||
if description.lower() not in content.lower():
|
||||
return False, f"Missing confidence description: {description}"
|
||||
|
||||
return True, "Confidence scoring is properly defined"
|
||||
passed, msg = check_confidence_scoring()
|
||||
assert passed, msg
|
||||
|
||||
def test_example_quality():
|
||||
"""Test that examples are clear and complete."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
content = prompt_path.read_text()
|
||||
|
||||
# Check for example input/output
|
||||
if "example" not in content.lower():
|
||||
return False, "No examples provided"
|
||||
|
||||
# Check that example includes all categories
|
||||
example_section = content[content.lower().find("example"):]
|
||||
|
||||
# Look for JSON example
|
||||
json_match = re.search(r'\{[\s\S]*"knowledge"[\s\S]*\}', example_section)
|
||||
if not json_match:
|
||||
return False, "No JSON example found"
|
||||
|
||||
example_json = json_match.group(0)
|
||||
|
||||
# Check for all categories in example
|
||||
for category in ["fact", "pitfall", "pattern", "tool-quirk", "question"]:
|
||||
if category not in example_json:
|
||||
return False, f"Example missing category: {category}"
|
||||
|
||||
return True, "Examples are clear and complete"
|
||||
passed, msg = check_example_quality()
|
||||
assert passed, msg
|
||||
|
||||
def test_constraint_coverage():
|
||||
"""Test that constraints cover all requirements."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
content = prompt_path.read_text()
|
||||
|
||||
required_constraints = [
|
||||
"No hallucination",
|
||||
"only extract",
|
||||
"explicitly",
|
||||
"partial",
|
||||
"failed sessions",
|
||||
"1k tokens"
|
||||
]
|
||||
|
||||
for constraint in required_constraints:
|
||||
if constraint.lower() not in content.lower():
|
||||
return False, f"Missing constraint: {constraint}"
|
||||
|
||||
return True, "Constraints cover all requirements"
|
||||
passed, msg = check_constraint_coverage()
|
||||
assert passed, msg
|
||||
|
||||
def test_test_sessions():
|
||||
"""Test that test sessions exist and are valid."""
|
||||
test_sessions_dir = Path("test_sessions")
|
||||
if not test_sessions_dir.exists():
|
||||
return False, "test_sessions directory not found"
|
||||
|
||||
session_files = list(test_sessions_dir.glob("*.jsonl"))
|
||||
if len(session_files) < 5:
|
||||
return False, f"Only {len(session_files)} test sessions found, need 5"
|
||||
|
||||
# Check each session file
|
||||
for session_file in session_files:
|
||||
content = session_file.read_text()
|
||||
lines = content.strip().split("\n")
|
||||
|
||||
# Check that each line is valid JSON
|
||||
for i, line in enumerate(lines, 1):
|
||||
try:
|
||||
json.loads(line)
|
||||
except json.JSONDecodeError as e:
|
||||
return False, f"Invalid JSON in {session_file.name}, line {i}: {e}"
|
||||
|
||||
return True, f"Found {len(session_files)} valid test sessions"
|
||||
|
||||
def run_all_tests():
|
||||
"""Run all tests and return results."""
|
||||
tests = [
|
||||
("Prompt Structure", test_prompt_structure),
|
||||
("Confidence Scoring", test_confidence_scoring),
|
||||
("Example Quality", test_example_quality),
|
||||
("Constraint Coverage", test_constraint_coverage),
|
||||
("Test Sessions", test_test_sessions)
|
||||
]
|
||||
|
||||
results = []
|
||||
all_passed = True
|
||||
|
||||
for test_name, test_func in tests:
|
||||
try:
|
||||
passed, message = test_func()
|
||||
results.append({
|
||||
"test": test_name,
|
||||
"passed": passed,
|
||||
"message": message
|
||||
})
|
||||
if not passed:
|
||||
all_passed = False
|
||||
except Exception as e:
|
||||
results.append({
|
||||
"test": test_name,
|
||||
"passed": False,
|
||||
"message": f"Error: {str(e)}"
|
||||
})
|
||||
all_passed = False
|
||||
|
||||
# Print results
|
||||
print("=" * 60)
|
||||
print("HARVEST PROMPT TEST RESULTS")
|
||||
print("=" * 60)
|
||||
|
||||
for result in results:
|
||||
status = "✓ PASS" if result["passed"] else "✗ FAIL"
|
||||
print(f"{status}: {result['test']}")
|
||||
print(f" {result['message']}")
|
||||
print()
|
||||
|
||||
print("=" * 60)
|
||||
if all_passed:
|
||||
print("ALL TESTS PASSED!")
|
||||
else:
|
||||
print("SOME TESTS FAILED!")
|
||||
print("=" * 60)
|
||||
|
||||
return all_passed, results
|
||||
passed, msg = check_test_sessions()
|
||||
assert passed, msg
|
||||
|
||||
if __name__ == "__main__":
|
||||
all_passed, results = run_all_tests()
|
||||
|
||||
# Save results to file
|
||||
with open("test_results.json", "w") as f:
|
||||
json.dump({
|
||||
"all_passed": all_passed,
|
||||
"results": results,
|
||||
"timestamp": "2026-04-14T19:05:00Z"
|
||||
}, f, indent=2)
|
||||
|
||||
print(f"Results saved to test_results.json")
|
||||
|
||||
# Exit with appropriate code
|
||||
exit(0 if all_passed else 1)
|
||||
checks = [check_prompt_structure, check_confidence_scoring, check_example_quality, check_constraint_coverage, check_test_sessions]
|
||||
for fn in checks:
|
||||
ok, msg = fn()
|
||||
print(f"{'PASS' if ok else 'FAIL'}: {fn.__name__} -- {msg}")
|
||||
|
||||
207
tests/test_dedup.py
Normal file
207
tests/test_dedup.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""Tests for knowledge deduplication module (Issue #196)."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
||||
|
||||
from dedup import (
|
||||
normalize_text,
|
||||
content_hash,
|
||||
tokenize,
|
||||
token_similarity,
|
||||
quality_score,
|
||||
merge_facts,
|
||||
dedup_facts,
|
||||
generate_test_duplicates,
|
||||
)
|
||||
|
||||
|
||||
class TestNormalize:
|
||||
def test_lowercases(self):
|
||||
assert normalize_text("Hello World") == "hello world"
|
||||
|
||||
def test_collapses_whitespace(self):
|
||||
assert normalize_text(" hello world ") == "hello world"
|
||||
|
||||
def test_strips(self):
|
||||
assert normalize_text(" text ") == "text"
|
||||
|
||||
|
||||
class TestContentHash:
|
||||
def test_deterministic(self):
|
||||
h1 = content_hash("Hello World")
|
||||
h2 = content_hash("hello world")
|
||||
h3 = content_hash(" Hello World ")
|
||||
assert h1 == h2 == h3
|
||||
|
||||
def test_different_texts(self):
|
||||
h1 = content_hash("Hello")
|
||||
h2 = content_hash("World")
|
||||
assert h1 != h2
|
||||
|
||||
def test_returns_hex(self):
|
||||
h = content_hash("test")
|
||||
assert len(h) == 64 # SHA256
|
||||
assert all(c in '0123456789abcdef' for c in h)
|
||||
|
||||
|
||||
class TestTokenize:
|
||||
def test_extracts_words(self):
|
||||
tokens = tokenize("Hello World Test")
|
||||
assert "hello" in tokens
|
||||
assert "world" in tokens
|
||||
assert "test" in tokens
|
||||
|
||||
def test_skips_short_words(self):
|
||||
tokens = tokenize("a to is the hello")
|
||||
assert "a" not in tokens
|
||||
assert "to" not in tokens
|
||||
assert "hello" in tokens
|
||||
|
||||
def test_returns_set(self):
|
||||
tokens = tokenize("hello hello world")
|
||||
assert isinstance(tokens, set)
|
||||
assert len(tokens) == 2
|
||||
|
||||
|
||||
class TestTokenSimilarity:
|
||||
def test_identical(self):
|
||||
assert token_similarity("hello world", "hello world") == 1.0
|
||||
|
||||
def test_no_overlap(self):
|
||||
assert token_similarity("alpha beta", "gamma delta") == 0.0
|
||||
|
||||
def test_partial_overlap(self):
|
||||
sim = token_similarity("hello world test", "hello universe test")
|
||||
assert 0.3 < sim < 0.7
|
||||
|
||||
def test_empty(self):
|
||||
assert token_similarity("", "hello") == 0.0
|
||||
assert token_similarity("hello", "") == 0.0
|
||||
|
||||
def test_symmetric(self):
|
||||
a = "hello world test"
|
||||
b = "hello universe test"
|
||||
assert token_similarity(a, b) == token_similarity(b, a)
|
||||
|
||||
|
||||
class TestQualityScore:
|
||||
def test_high_confidence(self):
|
||||
fact = {"confidence": 0.95, "source_count": 5, "tags": ["test"], "related": ["x"]}
|
||||
score = quality_score(fact)
|
||||
assert score > 0.7
|
||||
|
||||
def test_low_confidence(self):
|
||||
fact = {"confidence": 0.3, "source_count": 1}
|
||||
score = quality_score(fact)
|
||||
assert score < 0.5
|
||||
|
||||
def test_defaults(self):
|
||||
score = quality_score({})
|
||||
assert 0 < score < 1
|
||||
|
||||
|
||||
class TestMergeFacts:
|
||||
def test_merges_tags(self):
|
||||
keep = {"id": "a", "fact": "test", "tags": ["git"], "confidence": 0.9}
|
||||
drop = {"id": "b", "fact": "test", "tags": ["python"], "confidence": 0.8}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert "git" in merged["tags"]
|
||||
assert "python" in merged["tags"]
|
||||
|
||||
def test_merges_source_count(self):
|
||||
keep = {"id": "a", "fact": "test", "source_count": 3}
|
||||
drop = {"id": "b", "fact": "test", "source_count": 2}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert merged["source_count"] == 5
|
||||
|
||||
def test_keeps_higher_confidence(self):
|
||||
keep = {"id": "a", "fact": "test", "confidence": 0.7}
|
||||
drop = {"id": "b", "fact": "test", "confidence": 0.9}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert merged["confidence"] == 0.9
|
||||
|
||||
def test_tracks_merged_from(self):
|
||||
keep = {"id": "a", "fact": "test"}
|
||||
drop = {"id": "b", "fact": "test"}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert "b" in merged["_merged_from"]
|
||||
|
||||
|
||||
class TestDedupFacts:
|
||||
def test_removes_exact_dupes(self):
|
||||
facts = [
|
||||
{"id": "1", "fact": "Always use git rebase"},
|
||||
{"id": "2", "fact": "Always use git rebase"}, # exact dupe
|
||||
{"id": "3", "fact": "Check logs first"},
|
||||
]
|
||||
deduped, stats = dedup_facts(facts)
|
||||
assert stats["exact_dupes"] == 1
|
||||
assert stats["unique"] == 2
|
||||
|
||||
def test_removes_near_dupes(self):
|
||||
facts = [
|
||||
{"id": "1", "fact": "Always check logs before deploying to production server"},
|
||||
{"id": "2", "fact": "Always check logs before deploying to production environment"},
|
||||
{"id": "3", "fact": "Use docker compose for local development environments"},
|
||||
]
|
||||
deduped, stats = dedup_facts(facts, near_threshold=0.5)
|
||||
assert stats["near_dupes"] >= 1
|
||||
assert stats["unique"] == 2
|
||||
|
||||
def test_preserves_unique(self):
|
||||
facts = [
|
||||
{"id": "1", "fact": "Use git rebase for clean history"},
|
||||
{"id": "2", "fact": "Docker containers should be stateless"},
|
||||
{"id": "3", "fact": "Always write tests before code"},
|
||||
]
|
||||
deduped, stats = dedup_facts(facts)
|
||||
assert stats["unique"] == 3
|
||||
assert stats["removed"] == 0
|
||||
|
||||
def test_empty_input(self):
|
||||
deduped, stats = dedup_facts([])
|
||||
assert stats["total"] == 0
|
||||
assert stats["unique"] == 0
|
||||
|
||||
def test_keeps_higher_quality_near_dup(self):
|
||||
facts = [
|
||||
{"id": "1", "fact": "Check logs before deploying to production server", "confidence": 0.5, "source_count": 1},
|
||||
{"id": "2", "fact": "Check logs before deploying to production environment", "confidence": 0.9, "source_count": 5, "tags": ["ops"]},
|
||||
]
|
||||
deduped, stats = dedup_facts(facts, near_threshold=0.5)
|
||||
assert stats["unique"] == 1
|
||||
# Higher quality fact should be kept
|
||||
assert deduped[0]["confidence"] == 0.9
|
||||
|
||||
def test_dry_run_does_not_modify(self):
|
||||
facts = [
|
||||
{"id": "1", "fact": "Same text"},
|
||||
{"id": "2", "fact": "Same text"},
|
||||
]
|
||||
deduped, stats = dedup_facts(facts, dry_run=True)
|
||||
assert stats["exact_dupes"] == 1
|
||||
# In dry_run, merge_facts is skipped so facts aren't modified
|
||||
assert len(deduped) == 1
|
||||
|
||||
|
||||
class TestGenerateTestDuplicates:
|
||||
def test_generates_correct_count(self):
|
||||
facts = generate_test_duplicates(20)
|
||||
assert len(facts) > 20 # 20 unique + duplicates
|
||||
|
||||
def test_has_exact_dupes(self):
|
||||
facts = generate_test_duplicates(20)
|
||||
hashes = [content_hash(f["fact"]) for f in facts]
|
||||
# Should have some duplicate hashes
|
||||
assert len(hashes) != len(set(hashes))
|
||||
|
||||
def test_dedup_removes_dupes(self):
|
||||
facts = generate_test_duplicates(20)
|
||||
deduped, stats = dedup_facts(facts)
|
||||
assert stats["unique"] <= 20
|
||||
assert stats["removed"] > 0
|
||||
227
tests/test_freshness.py
Normal file
227
tests/test_freshness.py
Normal file
@@ -0,0 +1,227 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/freshness.py — 8 tests."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or ".", ".."))
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"freshness", os.path.join(os.path.dirname(__file__) or ".", "..", "scripts", "freshness.py"))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
compute_file_hash = mod.compute_file_hash
|
||||
check_freshness = mod.check_freshness
|
||||
load_knowledge_entries = mod.load_knowledge_entries
|
||||
|
||||
|
||||
def test_compute_file_hash():
|
||||
"""File hash should be computed correctly."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
|
||||
f.write("test content")
|
||||
f.flush()
|
||||
h = compute_file_hash(f.name)
|
||||
assert h is not None
|
||||
assert h.startswith("sha256:")
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_compute_file_hash")
|
||||
|
||||
|
||||
def test_compute_file_hash_nonexistent():
|
||||
"""Nonexistent file should return None."""
|
||||
h = compute_file_hash("/nonexistent/file.txt")
|
||||
assert h is None
|
||||
print("PASS: test_compute_file_hash_nonexistent")
|
||||
|
||||
|
||||
def test_load_knowledge_entries_empty():
|
||||
"""Empty knowledge dir should return empty list."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
entries = load_knowledge_entries(tmpdir)
|
||||
assert entries == []
|
||||
print("PASS: test_load_knowledge_entries_empty")
|
||||
|
||||
|
||||
def test_load_knowledge_entries_from_index():
|
||||
"""Should load entries from index.json."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create index.json
|
||||
index_path = os.path.join(tmpdir, "index.json")
|
||||
with open(index_path, "w") as f:
|
||||
json.dump({
|
||||
"facts": [
|
||||
{
|
||||
"fact": "Test fact",
|
||||
"source_file": "test.py",
|
||||
"source_hash": "sha256:abc123",
|
||||
"category": "fact",
|
||||
"confidence": 0.9
|
||||
}
|
||||
]
|
||||
}, f)
|
||||
|
||||
entries = load_knowledge_entries(tmpdir)
|
||||
assert len(entries) == 1
|
||||
assert entries[0]["fact"] == "Test fact"
|
||||
assert entries[0]["source_file"] == "test.py"
|
||||
print("PASS: test_load_knowledge_entries_from_index")
|
||||
|
||||
|
||||
def test_load_knowledge_entries_from_yaml():
|
||||
"""Should load entries from YAML files."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create global directory
|
||||
global_dir = os.path.join(tmpdir, "global")
|
||||
os.makedirs(global_dir)
|
||||
|
||||
# Create YAML file
|
||||
yaml_path = os.path.join(global_dir, "test.yaml")
|
||||
with open(yaml_path, "w") as f:
|
||||
f.write("""
|
||||
pitfalls:
|
||||
- description: "Test pitfall"
|
||||
source_file: "test.py"
|
||||
source_hash: "sha256:def456"
|
||||
category: "pitfall"
|
||||
confidence: 0.8
|
||||
""")
|
||||
|
||||
entries = load_knowledge_entries(tmpdir)
|
||||
assert len(entries) == 1
|
||||
assert entries[0]["fact"] == "Test pitfall"
|
||||
assert entries[0]["category"] == "pitfall"
|
||||
print("PASS: test_load_knowledge_entries_from_yaml")
|
||||
|
||||
|
||||
def test_check_freshness_no_changes():
|
||||
"""With no source file reference, entries should be counted correctly."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create knowledge dir
|
||||
knowledge_dir = os.path.join(tmpdir, "knowledge")
|
||||
os.makedirs(knowledge_dir)
|
||||
|
||||
# Create repo dir
|
||||
repo_dir = os.path.join(tmpdir, "repo")
|
||||
os.makedirs(repo_dir)
|
||||
|
||||
# Create index.json with entry that has no source_file
|
||||
index_path = os.path.join(knowledge_dir, "index.json")
|
||||
with open(index_path, "w") as f:
|
||||
json.dump({
|
||||
"facts": [
|
||||
{
|
||||
"fact": "General knowledge",
|
||||
"category": "fact",
|
||||
"confidence": 0.9
|
||||
# No source_file or source_hash
|
||||
}
|
||||
]
|
||||
}, f)
|
||||
|
||||
result = check_freshness(knowledge_dir, repo_dir, days=1)
|
||||
|
||||
# Entry without source_file should be counted as "fresh" (no_source status)
|
||||
assert result["summary"]["total"] == 1
|
||||
assert result["summary"]["stale"] == 0
|
||||
assert result["summary"]["fresh"] == 1
|
||||
assert result["fresh_entries"][0]["status"] == "no_source"
|
||||
print("PASS: test_check_freshness_no_changes")
|
||||
|
||||
|
||||
def test_check_freshness_with_hash_mismatch():
|
||||
"""Hash mismatch should mark entry as stale."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create knowledge dir
|
||||
knowledge_dir = os.path.join(tmpdir, "knowledge")
|
||||
os.makedirs(knowledge_dir)
|
||||
|
||||
# Create repo dir with a file
|
||||
repo_dir = os.path.join(tmpdir, "repo")
|
||||
os.makedirs(repo_dir)
|
||||
|
||||
test_file = os.path.join(repo_dir, "test.py")
|
||||
with open(test_file, "w") as f:
|
||||
f.write("print('hello')")
|
||||
|
||||
# Create index.json with wrong hash
|
||||
index_path = os.path.join(knowledge_dir, "index.json")
|
||||
with open(index_path, "w") as f:
|
||||
json.dump({
|
||||
"facts": [
|
||||
{
|
||||
"fact": "Test fact",
|
||||
"source_file": "test.py",
|
||||
"source_hash": "sha256:wronghash",
|
||||
"category": "fact",
|
||||
"confidence": 0.9
|
||||
}
|
||||
]
|
||||
}, f)
|
||||
|
||||
# Initialize git repo
|
||||
os.system(f"cd {repo_dir} && git init && git add . && git commit -m 'init' 2>/dev/null")
|
||||
|
||||
result = check_freshness(knowledge_dir, repo_dir, days=1)
|
||||
|
||||
assert result["summary"]["total"] == 1
|
||||
assert result["summary"]["stale"] == 1
|
||||
assert result["summary"]["fresh"] == 0
|
||||
assert result["stale_entries"][0]["reason"] == "hash_mismatch"
|
||||
print("PASS: test_check_freshness_with_hash_mismatch")
|
||||
|
||||
|
||||
def test_check_freshness_missing_source():
|
||||
"""Missing source file should mark entry as stale."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create knowledge dir
|
||||
knowledge_dir = os.path.join(tmpdir, "knowledge")
|
||||
os.makedirs(knowledge_dir)
|
||||
|
||||
# Create repo dir (without the referenced file)
|
||||
repo_dir = os.path.join(tmpdir, "repo")
|
||||
os.makedirs(repo_dir)
|
||||
|
||||
# Create index.json referencing nonexistent file
|
||||
index_path = os.path.join(knowledge_dir, "index.json")
|
||||
with open(index_path, "w") as f:
|
||||
json.dump({
|
||||
"facts": [
|
||||
{
|
||||
"fact": "Test fact",
|
||||
"source_file": "nonexistent.py",
|
||||
"source_hash": "sha256:abc123",
|
||||
"category": "fact",
|
||||
"confidence": 0.9
|
||||
}
|
||||
]
|
||||
}, f)
|
||||
|
||||
# Initialize git repo
|
||||
os.system(f"cd {repo_dir} && git init && git add . && git commit -m 'init' 2>/dev/null")
|
||||
|
||||
result = check_freshness(knowledge_dir, repo_dir, days=1)
|
||||
|
||||
assert result["summary"]["total"] == 1
|
||||
assert result["summary"]["stale"] == 1
|
||||
assert result["summary"]["fresh"] == 0
|
||||
assert result["stale_entries"][0]["reason"] == "source_missing"
|
||||
print("PASS: test_check_freshness_missing_source")
|
||||
|
||||
|
||||
def run_all():
|
||||
test_compute_file_hash()
|
||||
test_compute_file_hash_nonexistent()
|
||||
test_load_knowledge_entries_empty()
|
||||
test_load_knowledge_entries_from_index()
|
||||
test_load_knowledge_entries_from_yaml()
|
||||
test_check_freshness_no_changes()
|
||||
test_check_freshness_with_hash_mismatch()
|
||||
test_check_freshness_missing_source()
|
||||
print("\nAll 8 tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all()
|
||||
108
tests/test_quality_gate.py
Normal file
108
tests/test_quality_gate.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""
|
||||
Tests for quality_gate.py — Knowledge entry quality scoring.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from datetime import datetime, timezone, timedelta
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from quality_gate import (
|
||||
score_specificity,
|
||||
score_actionability,
|
||||
score_freshness,
|
||||
score_source_quality,
|
||||
score_entry,
|
||||
filter_entries,
|
||||
)
|
||||
|
||||
|
||||
class TestScoreSpecificity(unittest.TestCase):
|
||||
def test_specific_content_scores_high(self):
|
||||
content = "Run `python3 deploy.py --env prod` on 2026-04-15. Example: step 1 configure nginx."
|
||||
score = score_specificity(content)
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_vague_content_scores_low(self):
|
||||
content = "It generally depends. Various factors might affect this. Basically, it varies."
|
||||
score = score_specificity(content)
|
||||
self.assertLess(score, 0.5)
|
||||
|
||||
def test_empty_scores_baseline(self):
|
||||
score = score_specificity("")
|
||||
self.assertAlmostEqual(score, 0.5, delta=0.1)
|
||||
|
||||
|
||||
class TestScoreActionability(unittest.TestCase):
|
||||
def test_actionable_content_scores_high(self):
|
||||
content = "1. Run `pip install -r requirements.txt`\n2. Execute `python3 train.py`\n3. Verify with `pytest`"
|
||||
score = score_actionability(content)
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_abstract_content_scores_low(self):
|
||||
content = "The concept of intelligence is fascinating and multifaceted."
|
||||
score = score_actionability(content)
|
||||
self.assertLess(score, 0.5)
|
||||
|
||||
|
||||
class TestScoreFreshness(unittest.TestCase):
|
||||
def test_recent_timestamp_scores_high(self):
|
||||
recent = datetime.now(timezone.utc).isoformat()
|
||||
score = score_freshness(recent)
|
||||
self.assertGreater(score, 0.9)
|
||||
|
||||
def test_old_timestamp_scores_low(self):
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=365)).isoformat()
|
||||
score = score_freshness(old)
|
||||
self.assertLess(score, 0.2)
|
||||
|
||||
def test_none_returns_baseline(self):
|
||||
score = score_freshness(None)
|
||||
self.assertEqual(score, 0.5)
|
||||
|
||||
|
||||
class TestScoreSourceQuality(unittest.TestCase):
|
||||
def test_claude_scores_high(self):
|
||||
self.assertGreater(score_source_quality("claude-sonnet"), 0.85)
|
||||
|
||||
def test_ollama_scores_lower(self):
|
||||
self.assertLess(score_source_quality("ollama"), 0.7)
|
||||
|
||||
def test_unknown_returns_default(self):
|
||||
self.assertEqual(score_source_quality("unknown"), 0.5)
|
||||
|
||||
|
||||
class TestScoreEntry(unittest.TestCase):
|
||||
def test_good_entry_scores_high(self):
|
||||
entry = {
|
||||
"content": "To deploy: run `kubectl apply -f deployment.yaml`. Verify with `kubectl get pods`.",
|
||||
"model": "claude-sonnet",
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
score = score_entry(entry)
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_poor_entry_scores_low(self):
|
||||
entry = {
|
||||
"content": "It depends. Various things might happen.",
|
||||
"model": "unknown",
|
||||
}
|
||||
score = score_entry(entry)
|
||||
self.assertLess(score, 0.5)
|
||||
|
||||
|
||||
class TestFilterEntries(unittest.TestCase):
|
||||
def test_filters_low_quality(self):
|
||||
entries = [
|
||||
{"content": "Run `deploy.py` to fix the issue.", "model": "claude"},
|
||||
{"content": "It might work sometimes.", "model": "unknown"},
|
||||
{"content": "Configure nginx: step 1 edit nginx.conf", "model": "gpt-4"},
|
||||
]
|
||||
filtered = filter_entries(entries, threshold=0.5)
|
||||
self.assertGreaterEqual(len(filtered), 2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
234
tests/test_review_comment_generator.py
Normal file
234
tests/test_review_comment_generator.py
Normal file
@@ -0,0 +1,234 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Smoke tests for Review Comment Generator — Issue #126
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import hashlib
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[1]
|
||||
SCRIPTS_DIR = REPO_ROOT / "scripts"
|
||||
GENERATOR = SCRIPTS_DIR / "review_comment_generator.py"
|
||||
SAMPLE_FINDINGS = SCRIPTS_DIR / "sample_findings.jsonl"
|
||||
|
||||
|
||||
class TestGeneratorPresence:
|
||||
def test_script_exists(self):
|
||||
assert GENERATOR.exists(), f"Missing: {GENERATOR}"
|
||||
|
||||
def test_shebang_is_python(self):
|
||||
with open(GENERATOR) as f:
|
||||
first = f.readline().strip()
|
||||
assert first.startswith("#!"), "No shebang"
|
||||
assert "python" in first.lower()
|
||||
|
||||
|
||||
class TestDeduplication:
|
||||
def test_content_hash_deterministic(self):
|
||||
from hashlib import sha256
|
||||
def ch(f):
|
||||
key = f"{f['file']}:{f['line']}:{f['text']}"
|
||||
return sha256(key.encode()).hexdigest()
|
||||
finding = {"file": "a.py", "line": 1, "text": "test"}
|
||||
assert ch(finding) == ch(finding)
|
||||
|
||||
def test_duplicate_findings_are_removed(self):
|
||||
findings = [
|
||||
{"file": "a.py", "line": 1, "text": "foo", "severity": "info"},
|
||||
{"file": "a.py", "line": 1, "text": "foo", "severity": "warning"},
|
||||
{"file": "b.py", "line": 2, "text": "bar", "severity": "info"},
|
||||
]
|
||||
seen = {}
|
||||
for f in findings:
|
||||
key = f"{f['file']}:{f['line']}:{f['text']}"
|
||||
seen[key] = f
|
||||
assert len(seen) == 2
|
||||
|
||||
def test_different_findings_are_kept(self):
|
||||
findings = [
|
||||
{"file": "a.py", "line": 1, "text": "foo"},
|
||||
{"file": "a.py", "line": 2, "text": "foo"},
|
||||
{"file": "a.py", "line": 1, "text": "bar"},
|
||||
]
|
||||
seen = {}
|
||||
for f in findings:
|
||||
key = f"{f['file']}:{f['line']}:{f['text']}"
|
||||
seen[key] = f
|
||||
assert len(seen) == 3
|
||||
|
||||
|
||||
class TestCommentFormatting:
|
||||
def test_format_basic(self):
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import format_comment
|
||||
f = {"file": "scripts/foo.py", "line": 10, "text": "Fix this bug", "severity": "warning"}
|
||||
body = format_comment(f)
|
||||
assert "📝 **Review Comment**" not in body # warning uses ⚠️
|
||||
assert "⚠️ **Review Comment**" in body
|
||||
assert "`scripts/foo.py`" in body
|
||||
assert "Line: 10" in body
|
||||
assert "> Fix this bug" in body
|
||||
|
||||
def test_format_severity_emoji(self):
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import format_comment
|
||||
cases = [("error", "🛑"), ("warning", "⚠️"), ("info", "ℹ️"), ("unknown", "📝")]
|
||||
for severity, emoji in cases:
|
||||
f = {"file": "x.py", "line": 1, "text": "test", "severity": severity}
|
||||
assert emoji in format_comment(f)
|
||||
|
||||
|
||||
class TestFindingsLoader:
|
||||
def test_load_from_file(self):
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import load_findings
|
||||
findings = load_findings(SAMPLE_FINDINGS, from_stdin=False)
|
||||
assert len(findings) >= 4
|
||||
|
||||
def test_load_ignores_blank_and_comments(self):
|
||||
import tempfile, os
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", delete=False) as tf:
|
||||
tf.write('{"file":"a.py","line":1,"text":"valid"}\n')
|
||||
tf.write('\n')
|
||||
tf.write('# this is a comment\n')
|
||||
tf.write('{"file":"b.py","line":2,"text":"also valid"}\n')
|
||||
tfname = tf.name
|
||||
try:
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import load_findings
|
||||
assert len(load_findings(Path(tfname), from_stdin=False)) == 2
|
||||
finally:
|
||||
os.unlink(tfname)
|
||||
|
||||
def test_invalid_json_line_skipped(self, capsys):
|
||||
import tempfile, os
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", delete=False) as tf:
|
||||
tf.write('invalid json\n')
|
||||
tf.write('{"file":"ok.py","line":1,"text":"valid"}\n')
|
||||
tfname = tf.name
|
||||
try:
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import load_findings
|
||||
assert len(load_findings(Path(tfname), from_stdin=False)) == 1
|
||||
finally:
|
||||
os.unlink(tfname)
|
||||
|
||||
|
||||
class TestDryRunMode:
|
||||
def test_dry_run_counts_unique(self):
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(GENERATOR), "--pr", "126",
|
||||
"--input", str(SAMPLE_FINDINGS), "--dry-run"],
|
||||
capture_output=True, text=True, cwd=REPO_ROOT, timeout=15
|
||||
)
|
||||
assert result.returncode == 0
|
||||
assert "DRY RUN" in result.stdout
|
||||
assert "Review Comment" in result.stdout
|
||||
|
||||
def test_dry_run_shows_all_unique(self):
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(GENERATOR), "--pr", "126",
|
||||
"--input", str(SAMPLE_FINDINGS), "--dry-run"],
|
||||
capture_output=True, text=True, cwd=REPO_ROOT, timeout=15
|
||||
)
|
||||
assert result.stdout.count("--- Comment") == 4
|
||||
|
||||
|
||||
class TestJSONOutputMode:
|
||||
def test_json_flag_emits_valid_json(self):
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(GENERATOR), "--pr", "126",
|
||||
"--input", str(SAMPLE_FINDINGS), "--json"],
|
||||
capture_output=True, text=True, cwd=REPO_ROOT, timeout=15
|
||||
)
|
||||
assert result.returncode == 0
|
||||
payload = json.loads(result.stdout)
|
||||
assert "total" in payload and "unique" in payload and "findings" in payload
|
||||
assert payload["total"] >= payload["unique"]
|
||||
|
||||
def test_json_findings_have_required_fields(self):
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(GENERATOR), "--pr", "126",
|
||||
"--input", str(SAMPLE_FINDINGS), "--json"],
|
||||
capture_output=True, text=True, cwd=REPO_ROOT, timeout=15
|
||||
)
|
||||
payload = json.loads(result.stdout)
|
||||
for f in payload["findings"]:
|
||||
assert "file" in f and "line" in f and "text" in f
|
||||
|
||||
|
||||
class TestGiteaClient:
|
||||
def test_post_issue_comment_builds_correct_url(self):
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import GiteaClient
|
||||
client = GiteaClient("https://example.com", "token123", "MyOrg", "myrepo")
|
||||
assert client.org == "MyOrg" and client.repo == "myrepo"
|
||||
|
||||
def test_generate_comment_body_has_required_fields(self):
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import format_comment
|
||||
f = {"file": "x.py", "line": 5, "text": "Fix this", "severity": "error"}
|
||||
body = format_comment(f)
|
||||
assert "x.py" in body and "5" in body and "Fix this" in body
|
||||
|
||||
|
||||
class TestFullPipeline:
|
||||
def test_end_to_end_json_output(self):
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(GENERATOR), "--pr", "126",
|
||||
"--input", str(SAMPLE_FINDINGS), "--json"],
|
||||
capture_output=True, text=True, cwd=REPO_ROOT, timeout=15
|
||||
)
|
||||
assert result.returncode == 0
|
||||
data = json.loads(result.stdout)
|
||||
assert data["total"] == 5
|
||||
assert data["unique"] == 4
|
||||
f = data["findings"][0]
|
||||
for key in ("file", "line", "text", "severity"):
|
||||
assert key in f
|
||||
|
||||
def test_token_loading_fallback(self):
|
||||
sys.path.insert(0, str(SCRIPTS_DIR))
|
||||
from review_comment_generator import load_token
|
||||
token = load_token()
|
||||
assert token is None or isinstance(token, str)
|
||||
|
||||
|
||||
class TestErrorHandling:
|
||||
def test_missing_input_shows_error(self):
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(GENERATOR), "--pr", "126"],
|
||||
capture_output=True, text=True, cwd=REPO_ROOT, timeout=15
|
||||
)
|
||||
assert result.returncode != 0
|
||||
assert "--input" in result.stderr or "--stdin" in result.stderr
|
||||
|
||||
def test_invalid_json_line_skipped(self):
|
||||
import tempfile, os
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", delete=False) as tf:
|
||||
tf.write('invalid json\n')
|
||||
tf.write('{"file":"ok.py","line":1,"text":"valid"}\n')
|
||||
tfname = tf.name
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(GENERATOR), "--pr", "126",
|
||||
"--input", tfname, "--json"],
|
||||
capture_output=True, text=True, cwd=REPO_ROOT, timeout=15
|
||||
)
|
||||
data = json.loads(result.stdout)
|
||||
assert data["total"] == 1
|
||||
assert data["unique"] == 1
|
||||
finally:
|
||||
os.unlink(tfname)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user