171 lines
4.8 KiB
Python
171 lines
4.8 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Tests for PR Complexity Scorer — unit tests for the scoring logic.
|
|
"""
|
|
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent))
|
|
|
|
from pr_complexity_scorer import (
|
|
score_pr,
|
|
is_dependency_file,
|
|
is_test_file,
|
|
TIME_PER_POINT,
|
|
SMALL_FILES,
|
|
MEDIUM_FILES,
|
|
LARGE_FILES,
|
|
SMALL_LINES,
|
|
MEDIUM_LINES,
|
|
LARGE_LINES,
|
|
)
|
|
|
|
PASS = 0
|
|
FAIL = 0
|
|
|
|
def test(name):
|
|
def decorator(fn):
|
|
global PASS, FAIL
|
|
try:
|
|
fn()
|
|
PASS += 1
|
|
print(f" [PASS] {name}")
|
|
except AssertionError as e:
|
|
FAIL += 1
|
|
print(f" [FAIL] {name}: {e}")
|
|
except Exception as e:
|
|
FAIL += 1
|
|
print(f" [FAIL] {name}: Unexpected error: {e}")
|
|
return decorator
|
|
|
|
def assert_eq(a, b, msg=""):
|
|
if a != b:
|
|
raise AssertionError(f"{msg} expected {b!r}, got {a!r}")
|
|
|
|
def assert_true(v, msg=""):
|
|
if not v:
|
|
raise AssertionError(msg or "Expected True")
|
|
|
|
def assert_false(v, msg=""):
|
|
if v:
|
|
raise AssertionError(msg or "Expected False")
|
|
|
|
|
|
print("=== PR Complexity Scorer Tests ===\n")
|
|
|
|
print("-- File Classification --")
|
|
|
|
@test("dependency file detection — requirements.txt")
|
|
def _():
|
|
assert_true(is_dependency_file("requirements.txt"))
|
|
assert_true(is_dependency_file("src/requirements.txt"))
|
|
assert_false(is_dependency_file("requirements_test.txt"))
|
|
|
|
@test("dependency file detection — pyproject.toml")
|
|
def _():
|
|
assert_true(is_dependency_file("pyproject.toml"))
|
|
assert_false(is_dependency_file("myproject.py"))
|
|
|
|
@test("test file detection — pytest style")
|
|
def _():
|
|
assert_true(is_test_file("tests/test_api.py"))
|
|
assert_true(is_test_file("test_module.py"))
|
|
assert_true(is_test_file("src/module_test.py"))
|
|
|
|
@test("test file detection — other frameworks")
|
|
def _():
|
|
assert_true(is_test_file("spec/feature_spec.rb"))
|
|
assert_true(is_test_file("__tests__/component.test.js"))
|
|
assert_false(is_test_file("testfixtures/helper.py"))
|
|
|
|
|
|
print("\n-- Scoring Logic --")
|
|
|
|
@test("small PR gets low score (1-3)")
|
|
def _():
|
|
score, minutes, _ = score_pr(
|
|
files_changed=3,
|
|
additions=50,
|
|
deletions=10,
|
|
has_dependency_changes=False,
|
|
test_coverage_delta=None
|
|
)
|
|
assert_true(1 <= score <= 3, f"Score should be low, got {score}")
|
|
assert_true(minutes < 20)
|
|
|
|
@test("medium PR gets medium score (4-6)")
|
|
def _():
|
|
score, minutes, _ = score_pr(
|
|
files_changed=15,
|
|
additions=400,
|
|
deletions=100,
|
|
has_dependency_changes=False,
|
|
test_coverage_delta=None
|
|
)
|
|
assert_true(4 <= score <= 6, f"Score should be medium, got {score}")
|
|
assert_true(20 <= minutes <= 45)
|
|
|
|
@test("large PR gets high score (7-9)")
|
|
def _():
|
|
score, minutes, _ = score_pr(
|
|
files_changed=60,
|
|
additions=3000,
|
|
deletions=1500,
|
|
has_dependency_changes=True,
|
|
test_coverage_delta=None
|
|
)
|
|
assert_true(7 <= score <= 9, f"Score should be high, got {score}")
|
|
assert_true(minutes >= 45)
|
|
|
|
@test("dependency changes boost score")
|
|
def _():
|
|
base_score, _, _ = score_pr(
|
|
files_changed=10, additions=200, deletions=50,
|
|
has_dependency_changes=False, test_coverage_delta=None
|
|
)
|
|
dep_score, _, _ = score_pr(
|
|
files_changed=10, additions=200, deletions=50,
|
|
has_dependency_changes=True, test_coverage_delta=None
|
|
)
|
|
assert_true(dep_score > base_score, f"Deps: {base_score} -> {dep_score}")
|
|
|
|
@test("adding tests lowers complexity")
|
|
def _():
|
|
base_score, _, _ = score_pr(
|
|
files_changed=8, additions=150, deletions=20,
|
|
has_dependency_changes=False, test_coverage_delta=None
|
|
)
|
|
better_score, _, _ = score_pr(
|
|
files_changed=8, additions=180, deletions=20,
|
|
has_dependency_changes=False, test_coverage_delta=3
|
|
)
|
|
assert_true(better_score < base_score, f"Tests: {base_score} -> {better_score}")
|
|
|
|
@test("removing tests increases complexity")
|
|
def _():
|
|
base_score, _, _ = score_pr(
|
|
files_changed=8, additions=150, deletions=20,
|
|
has_dependency_changes=False, test_coverage_delta=None
|
|
)
|
|
worse_score, _, _ = score_pr(
|
|
files_changed=8, additions=150, deletions=20,
|
|
has_dependency_changes=False, test_coverage_delta=-2
|
|
)
|
|
assert_true(worse_score > base_score, f"Remove tests: {base_score} -> {worse_score}")
|
|
|
|
@test("score bounded 1-10")
|
|
def _():
|
|
for files, adds, dels in [(1, 10, 5), (100, 10000, 5000)]:
|
|
score, _, _ = score_pr(files, adds, dels, False, None)
|
|
assert_true(1 <= score <= 10, f"Score {score} out of range")
|
|
|
|
@test("estimated minutes exist for all scores")
|
|
def _():
|
|
for s in range(1, 11):
|
|
assert_true(s in TIME_PER_POINT, f"Missing time for score {s}")
|
|
|
|
|
|
print(f"\n=== Results: {PASS} passed, {FAIL} failed ===")
|
|
sys.exit(0 if FAIL == 0 else 1)
|