306 lines
9.8 KiB
Python
306 lines
9.8 KiB
Python
|
|
#!/usr/bin/env python3
|
||
|
|
"""
|
||
|
|
Tests for Priority Rebalancer
|
||
|
|
"""
|
||
|
|
|
||
|
|
import json
|
||
|
|
import os
|
||
|
|
import sys
|
||
|
|
import tempfile
|
||
|
|
from datetime import datetime, timedelta
|
||
|
|
from pathlib import Path
|
||
|
|
|
||
|
|
# Add script dir to path
|
||
|
|
sys.path.insert(0, str(Path(__file__).parent))
|
||
|
|
|
||
|
|
from priority_rebalancer import (
|
||
|
|
GiteaClient,
|
||
|
|
IssueScore,
|
||
|
|
PipelineSignal,
|
||
|
|
compute_issue_score,
|
||
|
|
collect_knowledge_signals,
|
||
|
|
collect_metrics_signals,
|
||
|
|
extract_priority,
|
||
|
|
generate_report,
|
||
|
|
generate_markdown_report,
|
||
|
|
PRIORITY_LEVELS,
|
||
|
|
)
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Test Helpers
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
PASS = 0
|
||
|
|
FAIL = 0
|
||
|
|
|
||
|
|
def test(name):
|
||
|
|
def decorator(fn):
|
||
|
|
global PASS, FAIL
|
||
|
|
try:
|
||
|
|
fn()
|
||
|
|
PASS += 1
|
||
|
|
print(f" [PASS] {name}")
|
||
|
|
except Exception as e:
|
||
|
|
FAIL += 1
|
||
|
|
print(f" [FAIL] {name}: {e}")
|
||
|
|
return decorator
|
||
|
|
|
||
|
|
def assert_eq(a, b, msg=""):
|
||
|
|
if a != b:
|
||
|
|
raise AssertionError(f"{msg} expected {b!r}, got {a!r}")
|
||
|
|
|
||
|
|
def assert_true(v, msg=""):
|
||
|
|
if not v:
|
||
|
|
raise AssertionError(msg or "Expected True")
|
||
|
|
|
||
|
|
def assert_false(v, msg=""):
|
||
|
|
if v:
|
||
|
|
raise AssertionError(msg or "Expected False")
|
||
|
|
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Priority Extraction Tests
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
print("=== Priority Rebalancer Tests ===\n")
|
||
|
|
|
||
|
|
print("-- Priority Extraction --")
|
||
|
|
|
||
|
|
@test("extract P0 from label")
|
||
|
|
def _():
|
||
|
|
assert_eq(extract_priority(["P0", "bug"]), "P0")
|
||
|
|
|
||
|
|
@test("extract P1 from priority:high")
|
||
|
|
def _():
|
||
|
|
assert_eq(extract_priority(["priority:high"]), "P1")
|
||
|
|
|
||
|
|
@test("extract P2 from priority:medium")
|
||
|
|
def _():
|
||
|
|
assert_eq(extract_priority(["priority:medium"]), "P2")
|
||
|
|
|
||
|
|
@test("extract P3 from priority:low")
|
||
|
|
def _():
|
||
|
|
assert_eq(extract_priority(["priority:low"]), "P3")
|
||
|
|
|
||
|
|
@test("returns None for no priority")
|
||
|
|
def _():
|
||
|
|
assert_eq(extract_priority(["bug", "enhancement"]), None)
|
||
|
|
|
||
|
|
@test("case insensitive")
|
||
|
|
def _():
|
||
|
|
assert_eq(extract_priority(["p1"]), "P1")
|
||
|
|
assert_eq(extract_priority(["PRIORITY:CRITICAL"]), "P0")
|
||
|
|
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Issue Scoring Tests
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
print("\n-- Issue Scoring --")
|
||
|
|
|
||
|
|
def make_issue(**kwargs):
|
||
|
|
defaults = {
|
||
|
|
"number": 1,
|
||
|
|
"title": "Test issue",
|
||
|
|
"labels": [],
|
||
|
|
"created_at": (datetime.utcnow() - timedelta(days=5)).isoformat() + "Z",
|
||
|
|
"comments": 0,
|
||
|
|
"assignees": None,
|
||
|
|
}
|
||
|
|
defaults.update(kwargs)
|
||
|
|
return defaults
|
||
|
|
|
||
|
|
@test("bug gets score boost")
|
||
|
|
def _():
|
||
|
|
issue = make_issue(title="Incorrect output format", labels=["bug"])
|
||
|
|
score = compute_issue_score(issue, "test-repo", [], datetime.utcnow())
|
||
|
|
assert_true(score.score > 0, f"Bug should boost score, got {score.score}")
|
||
|
|
# Bug label alone should be P2 or P3 (not P0)
|
||
|
|
assert_true(score.suggested_priority in ("P2", "P3"),
|
||
|
|
f"Bug label alone should be P2/P3, got {score.suggested_priority}")
|
||
|
|
|
||
|
|
@test("security gets high score")
|
||
|
|
def _():
|
||
|
|
issue = make_issue(title="Security: auth bypass", labels=["bug"])
|
||
|
|
score = compute_issue_score(issue, "test-repo", [], datetime.utcnow())
|
||
|
|
assert_true(score.score >= 25, f"Security should score high, got {score.score}")
|
||
|
|
|
||
|
|
@test("old dormant issue gets penalized")
|
||
|
|
def _():
|
||
|
|
issue = make_issue(
|
||
|
|
title="Some old feature",
|
||
|
|
created_at=(datetime.utcnow() - timedelta(days=120)).isoformat() + "Z",
|
||
|
|
comments=0
|
||
|
|
)
|
||
|
|
score = compute_issue_score(issue, "test-repo", [], datetime.utcnow())
|
||
|
|
assert_true(score.score < 0, f"Old dormant should be negative, got {score.score}")
|
||
|
|
assert_true(any("Dormant" in r for r in score.reasons), "Should mention dormancy")
|
||
|
|
|
||
|
|
@test("active discussion boosts score")
|
||
|
|
def _():
|
||
|
|
issue = make_issue(title="Important fix", comments=8)
|
||
|
|
score = compute_issue_score(issue, "test-repo", [], datetime.utcnow())
|
||
|
|
assert_true(score.score > 5, f"Active discussion should boost, got {score.score}")
|
||
|
|
assert_true(any("Active" in r for r in score.reasons))
|
||
|
|
|
||
|
|
@test("unassigned gets slight boost")
|
||
|
|
def _():
|
||
|
|
issue = make_issue(title="Fix bug", assignees=None)
|
||
|
|
score = compute_issue_score(issue, "test-repo", [], datetime.utcnow())
|
||
|
|
assert_true(any("Unassigned" in r for r in score.reasons))
|
||
|
|
|
||
|
|
@test("assigned issue notes assignee")
|
||
|
|
def _():
|
||
|
|
issue = make_issue(title="Fix bug", assignees=[{"login": "alice"}])
|
||
|
|
score = compute_issue_score(issue, "test-repo", [], datetime.utcnow())
|
||
|
|
assert_eq(score.assignee, "alice")
|
||
|
|
|
||
|
|
@test("nice-to-have gets penalized")
|
||
|
|
def _():
|
||
|
|
issue = make_issue(title="Nice to have: fancy animation")
|
||
|
|
score = compute_issue_score(issue, "test-repo", [], datetime.utcnow())
|
||
|
|
assert_true(score.score < 0, f"Nice-to-have should be negative, got {score.score}")
|
||
|
|
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Pipeline Signal Tests
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
print("\n-- Pipeline Signals --")
|
||
|
|
|
||
|
|
@test("signal alignment boosts matching issues")
|
||
|
|
def _():
|
||
|
|
signals = [PipelineSignal(
|
||
|
|
source="knowledge",
|
||
|
|
signal_type="stale_knowledge",
|
||
|
|
weight=0.8,
|
||
|
|
detail="20 stale facts"
|
||
|
|
)]
|
||
|
|
issue = make_issue(title="Fix stale knowledge entries")
|
||
|
|
score = compute_issue_score(issue, "test-repo", signals, datetime.utcnow())
|
||
|
|
assert_true(any("Matches signal" in r for r in score.reasons))
|
||
|
|
|
||
|
|
@test("empty knowledge boosts harvester issues")
|
||
|
|
def _():
|
||
|
|
signals = [PipelineSignal(
|
||
|
|
source="knowledge",
|
||
|
|
signal_type="empty_knowledge",
|
||
|
|
weight=0.7,
|
||
|
|
detail="0 facts"
|
||
|
|
)]
|
||
|
|
issue = make_issue(title="Implement harvester pipeline")
|
||
|
|
score = compute_issue_score(issue, "test-repo", signals, datetime.utcnow())
|
||
|
|
assert_true(any("Critical gap" in r for r in score.reasons))
|
||
|
|
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Knowledge Signal Collection Tests
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
print("\n-- Knowledge Signal Collection --")
|
||
|
|
|
||
|
|
@test("missing index generates signal")
|
||
|
|
def _():
|
||
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||
|
|
signals = collect_knowledge_signals(tmpdir)
|
||
|
|
assert_true(len(signals) > 0)
|
||
|
|
assert_eq(signals[0].signal_type, "missing_index")
|
||
|
|
|
||
|
|
@test("empty knowledge generates signal")
|
||
|
|
def _():
|
||
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||
|
|
idx = os.path.join(tmpdir, "index.json")
|
||
|
|
with open(idx, "w") as f:
|
||
|
|
json.dump({"facts": []}, f)
|
||
|
|
signals = collect_knowledge_signals(tmpdir)
|
||
|
|
assert_true(any(s.signal_type == "empty_knowledge" for s in signals))
|
||
|
|
|
||
|
|
@test("corrupt index generates signal")
|
||
|
|
def _():
|
||
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||
|
|
idx = os.path.join(tmpdir, "index.json")
|
||
|
|
with open(idx, "w") as f:
|
||
|
|
f.write("not json {{{")
|
||
|
|
signals = collect_knowledge_signals(tmpdir)
|
||
|
|
assert_true(any(s.signal_type == "corrupt_index" for s in signals))
|
||
|
|
|
||
|
|
@test("knowledge with facts passes")
|
||
|
|
def _():
|
||
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||
|
|
idx = os.path.join(tmpdir, "index.json")
|
||
|
|
with open(idx, "w") as f:
|
||
|
|
json.dump({"facts": [
|
||
|
|
{"id": 1, "repo": "test", "status": "fresh"},
|
||
|
|
{"id": 2, "repo": "test", "status": "fresh"},
|
||
|
|
]}, f)
|
||
|
|
signals = collect_knowledge_signals(tmpdir)
|
||
|
|
# Should not generate missing_index or empty_knowledge
|
||
|
|
assert_false(any(s.signal_type in ("missing_index", "empty_knowledge") for s in signals))
|
||
|
|
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Metrics Signal Collection Tests
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
print("\n-- Metrics Signal Collection --")
|
||
|
|
|
||
|
|
@test("empty metrics dir generates signal")
|
||
|
|
def _():
|
||
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||
|
|
signals = collect_metrics_signals(tmpdir)
|
||
|
|
assert_true(any(s.signal_type == "no_metrics" for s in signals))
|
||
|
|
|
||
|
|
@test("metrics with files passes")
|
||
|
|
def _():
|
||
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||
|
|
# Create files (simulating real metrics dir with .gitkeep + actual files)
|
||
|
|
with open(os.path.join(tmpdir, ".gitkeep"), "w") as f:
|
||
|
|
f.write("")
|
||
|
|
with open(os.path.join(tmpdir, "report.json"), "w") as f:
|
||
|
|
f.write("{}")
|
||
|
|
signals = collect_metrics_signals(tmpdir)
|
||
|
|
assert_false(any(s.signal_type == "no_metrics" for s in signals))
|
||
|
|
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Report Generation Tests
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
print("\n-- Report Generation --")
|
||
|
|
|
||
|
|
@test("report has correct structure")
|
||
|
|
def _():
|
||
|
|
scores = [
|
||
|
|
IssueScore(1, "repo1", "Bug fix", ["bug"], None, "P1", 30.0, ["test"], 5, 3, None),
|
||
|
|
IssueScore(2, "repo1", "Feature", ["enhancement"], "P3", None, -5.0, ["test"], 60, 0, "alice"),
|
||
|
|
]
|
||
|
|
signals = [PipelineSignal("knowledge", "stale_knowledge", 0.5, "10 stale")]
|
||
|
|
report = generate_report(scores, signals, "test-org", ["repo1"])
|
||
|
|
|
||
|
|
assert_eq(report["org"], "test-org")
|
||
|
|
assert_eq(report["total_issues"], 2)
|
||
|
|
assert_true("generated_at" in report)
|
||
|
|
assert_true("summary" in report)
|
||
|
|
assert_true("top_priority" in report)
|
||
|
|
assert_eq(report["summary"]["suggested_new_priorities"], 1)
|
||
|
|
|
||
|
|
@test("markdown report is non-empty")
|
||
|
|
def _():
|
||
|
|
scores = [IssueScore(1, "repo1", "Test", ["bug"], None, "P2", 15.0, ["reason"], 5, 0, None)]
|
||
|
|
report = generate_report(scores, [], "test-org", ["repo1"])
|
||
|
|
md = generate_markdown_report(report)
|
||
|
|
assert_true(len(md) > 100)
|
||
|
|
assert_true("Priority Rebalancer Report" in md)
|
||
|
|
assert_true("Top 10" in md)
|
||
|
|
|
||
|
|
|
||
|
|
# ============================================================
|
||
|
|
# Summary
|
||
|
|
# ============================================================
|
||
|
|
|
||
|
|
print(f"\n=== Summary ===")
|
||
|
|
print(f"Total: {PASS + FAIL} | Passed: {PASS} | Failed: {FAIL}")
|
||
|
|
|
||
|
|
if FAIL > 0:
|
||
|
|
sys.exit(1)
|