73 lines
2.8 KiB
Python
73 lines
2.8 KiB
Python
#!/usr/bin/env python3
|
|
"""Comprehensive tests for knowledge extraction prompt."""
|
|
import json, re
|
|
from pathlib import Path
|
|
|
|
def check_prompt_structure():
|
|
p = Path("templates/harvest-prompt.md")
|
|
if not p.exists(): return False, "harvest-prompt.md not found"
|
|
c = p.read_text()
|
|
for s in ["System Prompt","Instructions","Categories","Output Format","Confidence Scoring","Constraints","Example"]:
|
|
if s.lower() not in c.lower(): return False, f"Missing section: {s}"
|
|
for cat in ["fact","pitfall","pattern","tool-quirk","question"]:
|
|
if cat not in c: return False, f"Missing category: {cat}"
|
|
if len(c) > 5000: return False, f"Too large: {len(c)}"
|
|
if len(c) < 1000: return False, f"Too small: {len(c)}"
|
|
return True, "Prompt structure is valid"
|
|
|
|
def check_confidence_scoring():
|
|
c = Path("templates/harvest-prompt.md").read_text()
|
|
for l in ["0.9-1.0","0.7-0.8","0.5-0.6","0.3-0.4","0.1-0.2"]:
|
|
if l not in c: return False, f"Missing level: {l}"
|
|
return True, "Confidence scoring defined"
|
|
|
|
def check_example_quality():
|
|
c = Path("templates/harvest-prompt.md").read_text()
|
|
if "example" not in c.lower(): return False, "No examples"
|
|
m = re.search(r'"knowledge"', c[c.lower().find("example"):])
|
|
if not m: return False, "No JSON example"
|
|
return True, "Examples present"
|
|
|
|
def check_constraint_coverage():
|
|
c = Path("templates/harvest-prompt.md").read_text()
|
|
for x in ["no hallucination","explicitly","partial","failed sessions"]:
|
|
if x not in c.lower(): return False, f"Missing: {x}"
|
|
return True, "Constraints covered"
|
|
|
|
def check_test_sessions():
|
|
d = Path("test_sessions")
|
|
if not d.exists(): return False, "test_sessions/ not found"
|
|
files = list(d.glob("*.jsonl"))
|
|
if len(files) < 5: return False, f"Only {len(files)} sessions"
|
|
for f in files:
|
|
for i, line in enumerate(f.read_text().strip().split("\n"), 1):
|
|
try: json.loads(line)
|
|
except json.JSONDecodeError as e: return False, f"{f.name}:{i}: {e}"
|
|
return True, f"{len(files)} valid sessions"
|
|
|
|
def test_prompt_structure():
|
|
passed, msg = check_prompt_structure()
|
|
assert passed, msg
|
|
|
|
def test_confidence_scoring():
|
|
passed, msg = check_confidence_scoring()
|
|
assert passed, msg
|
|
|
|
def test_example_quality():
|
|
passed, msg = check_example_quality()
|
|
assert passed, msg
|
|
|
|
def test_constraint_coverage():
|
|
passed, msg = check_constraint_coverage()
|
|
assert passed, msg
|
|
|
|
def test_test_sessions():
|
|
passed, msg = check_test_sessions()
|
|
assert passed, msg
|
|
|
|
if __name__ == "__main__":
|
|
checks = [check_prompt_structure, check_confidence_scoring, check_example_quality, check_constraint_coverage, check_test_sessions]
|
|
for fn in checks:
|
|
ok, msg = fn()
|
|
print(f"{'PASS' if ok else 'FAIL'}: {fn.__name__} -- {msg}")
|