Some checks failed
Test / pytest (pull_request) Failing after 29s
- Introduce scripts/test_documentation_generator.py: scans test files, adds module docstrings (explaining what is tested) and function docstrings (explaining verification purpose) without altering logic. - Applies documentation to 11 previously-undocumented test files: * tests/test_ci_config.py — added module-level docstring * tests/test_dedup.py — 30 function docstrings * tests/test_knowledge_gap_identifier.py — 10 function docstrings * tests/test_perf_bottleneck_finder.py — 25 function docstrings * tests/test_quality_gate.py — 14 function docstrings * scripts/test_diff_analyzer.py — 10 function docstrings * scripts/test_gitea_issue_parser.py — 6 function docstrings * scripts/test_harvest_prompt_comprehensive.py — 5 function docstrings * scripts/test_improvement_proposals.py — 2 function docstrings * scripts/test_knowledge_staleness.py — 8 function docstrings * scripts/test_session_pair_harvester.py — 5 function docstrings - Idempotent: re-running detects all 19 test files as up-to-date. - Processes up to 25 files per run (meets 20+ capacity requirement). Closes #88
238 lines
8.1 KiB
Python
238 lines
8.1 KiB
Python
"""Tests for knowledge deduplication module (Issue #196)."""
|
|
|
|
import json
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
|
|
|
from dedup import (
|
|
normalize_text,
|
|
content_hash,
|
|
tokenize,
|
|
token_similarity,
|
|
quality_score,
|
|
merge_facts,
|
|
dedup_facts,
|
|
generate_test_duplicates,
|
|
)
|
|
|
|
|
|
class TestNormalize:
|
|
def test_lowercases(self):
|
|
"""Verifies lowercases logic."""
|
|
assert normalize_text("Hello World") == "hello world"
|
|
|
|
def test_collapses_whitespace(self):
|
|
"""Verifies collapses whitespace logic."""
|
|
assert normalize_text(" hello world ") == "hello world"
|
|
|
|
def test_strips(self):
|
|
"""Verifies strips logic."""
|
|
assert normalize_text(" text ") == "text"
|
|
|
|
|
|
class TestContentHash:
|
|
def test_deterministic(self):
|
|
"""Verifies deterministic logic."""
|
|
h1 = content_hash("Hello World")
|
|
h2 = content_hash("hello world")
|
|
h3 = content_hash(" Hello World ")
|
|
assert h1 == h2 == h3
|
|
|
|
def test_different_texts(self):
|
|
"""Verifies different texts logic."""
|
|
h1 = content_hash("Hello")
|
|
h2 = content_hash("World")
|
|
assert h1 != h2
|
|
|
|
def test_returns_hex(self):
|
|
"""Verifies returns hex logic."""
|
|
h = content_hash("test")
|
|
assert len(h) == 64 # SHA256
|
|
assert all(c in '0123456789abcdef' for c in h)
|
|
|
|
|
|
class TestTokenize:
|
|
def test_extracts_words(self):
|
|
"""Verifies extracts words logic."""
|
|
tokens = tokenize("Hello World Test")
|
|
assert "hello" in tokens
|
|
assert "world" in tokens
|
|
assert "test" in tokens
|
|
|
|
def test_skips_short_words(self):
|
|
"""Verifies skips short words logic."""
|
|
tokens = tokenize("a to is the hello")
|
|
assert "a" not in tokens
|
|
assert "to" not in tokens
|
|
assert "hello" in tokens
|
|
|
|
def test_returns_set(self):
|
|
"""Verifies returns set logic."""
|
|
tokens = tokenize("hello hello world")
|
|
assert isinstance(tokens, set)
|
|
assert len(tokens) == 2
|
|
|
|
|
|
class TestTokenSimilarity:
|
|
def test_identical(self):
|
|
"""Verifies identical logic."""
|
|
assert token_similarity("hello world", "hello world") == 1.0
|
|
|
|
def test_no_overlap(self):
|
|
"""Verifies no overlap logic."""
|
|
assert token_similarity("alpha beta", "gamma delta") == 0.0
|
|
|
|
def test_partial_overlap(self):
|
|
"""Verifies partial overlap logic."""
|
|
sim = token_similarity("hello world test", "hello universe test")
|
|
assert 0.3 < sim < 0.7
|
|
|
|
def test_empty(self):
|
|
"""Verifies behavior with empty or None input."""
|
|
assert token_similarity("", "hello") == 0.0
|
|
assert token_similarity("hello", "") == 0.0
|
|
|
|
def test_symmetric(self):
|
|
"""Verifies symmetric logic."""
|
|
a = "hello world test"
|
|
b = "hello universe test"
|
|
assert token_similarity(a, b) == token_similarity(b, a)
|
|
|
|
|
|
class TestQualityScore:
|
|
def test_high_confidence(self):
|
|
"""Verifies high confidence logic."""
|
|
fact = {"confidence": 0.95, "source_count": 5, "tags": ["test"], "related": ["x"]}
|
|
score = quality_score(fact)
|
|
assert score > 0.7
|
|
|
|
def test_low_confidence(self):
|
|
"""Verifies low confidence logic."""
|
|
fact = {"confidence": 0.3, "source_count": 1}
|
|
score = quality_score(fact)
|
|
assert score < 0.5
|
|
|
|
def test_defaults(self):
|
|
"""Verifies defaults logic."""
|
|
score = quality_score({})
|
|
assert 0 < score < 1
|
|
|
|
|
|
class TestMergeFacts:
|
|
def test_merges_tags(self):
|
|
"""Verifies merges tags logic."""
|
|
keep = {"id": "a", "fact": "test", "tags": ["git"], "confidence": 0.9}
|
|
drop = {"id": "b", "fact": "test", "tags": ["python"], "confidence": 0.8}
|
|
merged = merge_facts(keep, drop)
|
|
assert "git" in merged["tags"]
|
|
assert "python" in merged["tags"]
|
|
|
|
def test_merges_source_count(self):
|
|
"""Verifies merges source count logic."""
|
|
keep = {"id": "a", "fact": "test", "source_count": 3}
|
|
drop = {"id": "b", "fact": "test", "source_count": 2}
|
|
merged = merge_facts(keep, drop)
|
|
assert merged["source_count"] == 5
|
|
|
|
def test_keeps_higher_confidence(self):
|
|
"""Verifies keeps higher confidence logic."""
|
|
keep = {"id": "a", "fact": "test", "confidence": 0.7}
|
|
drop = {"id": "b", "fact": "test", "confidence": 0.9}
|
|
merged = merge_facts(keep, drop)
|
|
assert merged["confidence"] == 0.9
|
|
|
|
def test_tracks_merged_from(self):
|
|
"""Verifies tracks merged from logic."""
|
|
keep = {"id": "a", "fact": "test"}
|
|
drop = {"id": "b", "fact": "test"}
|
|
merged = merge_facts(keep, drop)
|
|
assert "b" in merged["_merged_from"]
|
|
|
|
|
|
class TestDedupFacts:
|
|
def test_removes_exact_dupes(self):
|
|
"""Verifies removes exact dupes logic."""
|
|
facts = [
|
|
{"id": "1", "fact": "Always use git rebase"},
|
|
{"id": "2", "fact": "Always use git rebase"}, # exact dupe
|
|
{"id": "3", "fact": "Check logs first"},
|
|
]
|
|
deduped, stats = dedup_facts(facts)
|
|
assert stats["exact_dupes"] == 1
|
|
assert stats["unique"] == 2
|
|
|
|
def test_removes_near_dupes(self):
|
|
"""Verifies removes near dupes logic."""
|
|
facts = [
|
|
{"id": "1", "fact": "Always check logs before deploying to production server"},
|
|
{"id": "2", "fact": "Always check logs before deploying to production environment"},
|
|
{"id": "3", "fact": "Use docker compose for local development environments"},
|
|
]
|
|
deduped, stats = dedup_facts(facts, near_threshold=0.5)
|
|
assert stats["near_dupes"] >= 1
|
|
assert stats["unique"] == 2
|
|
|
|
def test_preserves_unique(self):
|
|
"""Verifies preserves unique logic."""
|
|
facts = [
|
|
{"id": "1", "fact": "Use git rebase for clean history"},
|
|
{"id": "2", "fact": "Docker containers should be stateless"},
|
|
{"id": "3", "fact": "Always write tests before code"},
|
|
]
|
|
deduped, stats = dedup_facts(facts)
|
|
assert stats["unique"] == 3
|
|
assert stats["removed"] == 0
|
|
|
|
def test_empty_input(self):
|
|
"""Verifies behavior with empty or None input."""
|
|
deduped, stats = dedup_facts([])
|
|
assert stats["total"] == 0
|
|
assert stats["unique"] == 0
|
|
|
|
def test_keeps_higher_quality_near_dup(self):
|
|
"""Verifies keeps higher quality near dup logic."""
|
|
facts = [
|
|
{"id": "1", "fact": "Check logs before deploying to production server", "confidence": 0.5, "source_count": 1},
|
|
{"id": "2", "fact": "Check logs before deploying to production environment", "confidence": 0.9, "source_count": 5, "tags": ["ops"]},
|
|
]
|
|
deduped, stats = dedup_facts(facts, near_threshold=0.5)
|
|
assert stats["unique"] == 1
|
|
# Higher quality fact should be kept
|
|
assert deduped[0]["confidence"] == 0.9
|
|
|
|
def test_dry_run_does_not_modify(self):
|
|
"""Verifies dry run does not modify logic."""
|
|
facts = [
|
|
{"id": "1", "fact": "Same text"},
|
|
{"id": "2", "fact": "Same text"},
|
|
]
|
|
deduped, stats = dedup_facts(facts, dry_run=True)
|
|
assert stats["exact_dupes"] == 1
|
|
# In dry_run, merge_facts is skipped so facts aren't modified
|
|
assert len(deduped) == 1
|
|
|
|
|
|
class TestGenerateTestDuplicates:
|
|
def test_generates_correct_count(self):
|
|
"""Verifies generates correct count logic."""
|
|
facts = generate_test_duplicates(20)
|
|
assert len(facts) > 20 # 20 unique + duplicates
|
|
|
|
def test_has_exact_dupes(self):
|
|
"""Verifies has exact dupes logic."""
|
|
facts = generate_test_duplicates(20)
|
|
hashes = [content_hash(f["fact"]) for f in facts]
|
|
# Should have some duplicate hashes
|
|
assert len(hashes) != len(set(hashes))
|
|
|
|
def test_dedup_removes_dupes(self):
|
|
"""Verifies dedup removes dupes logic."""
|
|
facts = generate_test_duplicates(20)
|
|
deduped, stats = dedup_facts(facts)
|
|
assert stats["unique"] <= 20
|
|
assert stats["removed"] > 0
|