Files
compounding-intelligence/tests/test_quality_gate.py
StepFun fe517158a0
Some checks failed
Test / pytest (pull_request) Failing after 29s
feat: add test documentation generator (#88)
- Introduce scripts/test_documentation_generator.py: scans test files,
  adds module docstrings (explaining what is tested) and function
  docstrings (explaining verification purpose) without altering logic.
- Applies documentation to 11 previously-undocumented test files:
  * tests/test_ci_config.py — added module-level docstring
  * tests/test_dedup.py — 30 function docstrings
  * tests/test_knowledge_gap_identifier.py — 10 function docstrings
  * tests/test_perf_bottleneck_finder.py — 25 function docstrings
  * tests/test_quality_gate.py — 14 function docstrings
  * scripts/test_diff_analyzer.py — 10 function docstrings
  * scripts/test_gitea_issue_parser.py — 6 function docstrings
  * scripts/test_harvest_prompt_comprehensive.py — 5 function docstrings
  * scripts/test_improvement_proposals.py — 2 function docstrings
  * scripts/test_knowledge_staleness.py — 8 function docstrings
  * scripts/test_session_pair_harvester.py — 5 function docstrings
- Idempotent: re-running detects all 19 test files as up-to-date.
- Processes up to 25 files per run (meets 20+ capacity requirement).

Closes #88
2026-04-25 20:58:00 -04:00

123 lines
4.3 KiB
Python

"""
Tests for quality_gate.py — Knowledge entry quality scoring.
"""
import unittest
from datetime import datetime, timezone, timedelta
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from quality_gate import (
score_specificity,
score_actionability,
score_freshness,
score_source_quality,
score_entry,
filter_entries,
)
class TestScoreSpecificity(unittest.TestCase):
def test_specific_content_scores_high(self):
"""Verifies specific content scores high logic."""
content = "Run `python3 deploy.py --env prod` on 2026-04-15. Example: step 1 configure nginx."
score = score_specificity(content)
self.assertGreater(score, 0.6)
def test_vague_content_scores_low(self):
"""Verifies vague content scores low logic."""
content = "It generally depends. Various factors might affect this. Basically, it varies."
score = score_specificity(content)
self.assertLess(score, 0.5)
def test_empty_scores_baseline(self):
"""Verifies behavior with empty or None input."""
score = score_specificity("")
self.assertAlmostEqual(score, 0.5, delta=0.1)
class TestScoreActionability(unittest.TestCase):
def test_actionable_content_scores_high(self):
"""Verifies actionable content scores high logic."""
content = "1. Run `pip install -r requirements.txt`\n2. Execute `python3 train.py`\n3. Verify with `pytest`"
score = score_actionability(content)
self.assertGreater(score, 0.6)
def test_abstract_content_scores_low(self):
"""Verifies abstract content scores low logic."""
content = "The concept of intelligence is fascinating and multifaceted."
score = score_actionability(content)
self.assertLess(score, 0.5)
class TestScoreFreshness(unittest.TestCase):
def test_recent_timestamp_scores_high(self):
"""Verifies recent timestamp scores high logic."""
recent = datetime.now(timezone.utc).isoformat()
score = score_freshness(recent)
self.assertGreater(score, 0.9)
def test_old_timestamp_scores_low(self):
"""Verifies old timestamp scores low logic."""
old = (datetime.now(timezone.utc) - timedelta(days=365)).isoformat()
score = score_freshness(old)
self.assertLess(score, 0.2)
def test_none_returns_baseline(self):
"""Verifies behavior with empty or None input."""
score = score_freshness(None)
self.assertEqual(score, 0.5)
class TestScoreSourceQuality(unittest.TestCase):
def test_claude_scores_high(self):
"""Verifies claude scores high logic."""
self.assertGreater(score_source_quality("claude-sonnet"), 0.85)
def test_ollama_scores_lower(self):
"""Verifies ollama scores lower logic."""
self.assertLess(score_source_quality("ollama"), 0.7)
def test_unknown_returns_default(self):
"""Verifies unknown returns default logic."""
self.assertEqual(score_source_quality("unknown"), 0.5)
class TestScoreEntry(unittest.TestCase):
def test_good_entry_scores_high(self):
"""Verifies good entry scores high logic."""
entry = {
"content": "To deploy: run `kubectl apply -f deployment.yaml`. Verify with `kubectl get pods`.",
"model": "claude-sonnet",
"timestamp": datetime.now(timezone.utc).isoformat(),
}
score = score_entry(entry)
self.assertGreater(score, 0.6)
def test_poor_entry_scores_low(self):
"""Verifies poor entry scores low logic."""
entry = {
"content": "It depends. Various things might happen.",
"model": "unknown",
}
score = score_entry(entry)
self.assertLess(score, 0.5)
class TestFilterEntries(unittest.TestCase):
def test_filters_low_quality(self):
"""Verifies knowledge filtering by filters low quality."""
entries = [
{"content": "Run `deploy.py` to fix the issue.", "model": "claude"},
{"content": "It might work sometimes.", "model": "unknown"},
{"content": "Configure nginx: step 1 edit nginx.conf", "model": "gpt-4"},
]
filtered = filter_entries(entries, threshold=0.5)
self.assertGreaterEqual(len(filtered), 2)
if __name__ == "__main__":
unittest.main()