Compare commits
1 Commits
step35/91-
...
step35/161
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
44607f8484 |
271
scripts/dependency_freshness.py
Normal file
271
scripts/dependency_freshness.py
Normal file
@@ -0,0 +1,271 @@
|
||||
#!/usr/bin/env python3
|
||||
"""dependency_freshness.py - Compare installed dependencies against latest PyPI versions.
|
||||
|
||||
Identify packages that are more than 2 major versions behind.
|
||||
Outputs a human-readable report by default or JSON with --json flag.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from packaging import version
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
|
||||
def parse_requirements(requirements_path: str) -> List[str]:
|
||||
"""Parse package names from a requirements.txt file."""
|
||||
packages = []
|
||||
try:
|
||||
with open(requirements_path, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
pkg_name = line
|
||||
for delim in ['[', '>', '<', '=', '!', ';', '@']:
|
||||
if delim in pkg_name:
|
||||
pkg_name = pkg_name.split(delim)[0]
|
||||
pkg_name = pkg_name.strip()
|
||||
if pkg_name:
|
||||
packages.append(pkg_name.lower())
|
||||
except FileNotFoundError:
|
||||
print(f"Warning: requirements file not found: {requirements_path}", file=sys.stderr)
|
||||
return packages
|
||||
|
||||
|
||||
def get_installed_packages() -> Dict[str, str]:
|
||||
"""Get all installed packages via pip list --format=json."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, '-m', 'pip', 'list', '--format=json'],
|
||||
capture_output=True, text=True, check=True
|
||||
)
|
||||
packages = json.loads(result.stdout)
|
||||
return {pkg['name'].lower(): pkg['version'] for pkg in packages}
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error running pip list: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error parsing pip output: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_outdated_packages() -> Dict[str, dict]:
|
||||
"""Get outdated packages via pip list --outdated --format=json."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, '-m', 'pip', 'list', '--outdated', '--format=json'],
|
||||
capture_output=True, text=True, check=True
|
||||
)
|
||||
outdated_list = json.loads(result.stdout)
|
||||
outdated = {}
|
||||
for pkg in outdated_list:
|
||||
name = pkg['name'].lower()
|
||||
outdated[name] = {
|
||||
'installed': pkg.get('version', ''),
|
||||
'latest': pkg.get('latest_version', ''),
|
||||
'latest_filetype': pkg.get('latest_filetype', '')
|
||||
}
|
||||
return outdated
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error running pip list --outdated: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error parsing pip outdated output: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_major_version(v: str) -> int:
|
||||
"""Extract major version number from a version string."""
|
||||
try:
|
||||
parsed = version.parse(v)
|
||||
if hasattr(parsed, 'major'):
|
||||
return int(parsed.major)
|
||||
parts = str(v).split('.')
|
||||
if parts:
|
||||
return int(parts[0])
|
||||
except Exception:
|
||||
pass
|
||||
return 0
|
||||
|
||||
|
||||
def is_more_than_two_majors_behind(installed_ver: str, latest_ver: str) -> bool:
|
||||
"""Check if installed version is more than 2 major versions behind latest."""
|
||||
try:
|
||||
installed_major = get_major_version(installed_ver)
|
||||
latest_major = get_major_version(latest_ver)
|
||||
return (latest_major - installed_major) > 2
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def analyze_dependencies(
|
||||
required_packages: List[str],
|
||||
installed_packages: Dict[str, str],
|
||||
outdated_packages: Dict[str, dict]
|
||||
) -> Tuple[List[dict], List[str], List[dict]]:
|
||||
"""Analyze dependency freshness."""
|
||||
very_outdated = []
|
||||
missing = []
|
||||
outdated_but_not_critical = []
|
||||
|
||||
for pkg in required_packages:
|
||||
if pkg not in installed_packages:
|
||||
missing.append(pkg)
|
||||
continue
|
||||
|
||||
installed_ver = installed_packages[pkg]
|
||||
if pkg not in outdated_packages:
|
||||
continue
|
||||
|
||||
latest_ver = outdated_packages[pkg]['latest']
|
||||
if is_more_than_two_majors_behind(installed_ver, latest_ver):
|
||||
very_outdated.append({
|
||||
'package': pkg,
|
||||
'installed': installed_ver,
|
||||
'latest': latest_ver,
|
||||
'major_diff': get_major_version(latest_ver) - get_major_version(installed_ver)
|
||||
})
|
||||
else:
|
||||
outdated_but_not_critical.append({
|
||||
'package': pkg,
|
||||
'installed': installed_ver,
|
||||
'latest': latest_ver,
|
||||
'major_diff': get_major_version(latest_ver) - get_major_version(installed_ver)
|
||||
})
|
||||
|
||||
return very_outdated, missing, outdated_but_not_critical
|
||||
|
||||
|
||||
def generate_human_report(
|
||||
very_outdated: List[dict],
|
||||
missing: List[str],
|
||||
outdated_but_not_critical: List[dict],
|
||||
requirements_path: str
|
||||
) -> str:
|
||||
"""Generate a human-readable staleness report."""
|
||||
lines = []
|
||||
lines.append("=" * 60)
|
||||
lines.append("DEPENDENCY FRESHNESS REPORT")
|
||||
lines.append("=" * 60)
|
||||
lines.append(f"Requirements file: {requirements_path}")
|
||||
total = len(very_outdated) + len(missing) + len(outdated_but_not_critical)
|
||||
lines.append(f"Total dependencies checked: {total}")
|
||||
lines.append(f"Very outdated (>2 major versions behind): {len(very_outdated)}")
|
||||
lines.append(f"Outdated but within 2 major versions: {len(outdated_but_not_critical)}")
|
||||
lines.append(f"Missing (not installed): {len(missing)}")
|
||||
lines.append("")
|
||||
|
||||
if very_outdated:
|
||||
lines.append("!!! VERY OUTDATED PACKAGES (consider updating):")
|
||||
lines.append("-" * 60)
|
||||
for pkg_info in very_outdated:
|
||||
lines.append(f" {pkg_info['package']}")
|
||||
lines.append(f" Installed: {pkg_info['installed']}")
|
||||
lines.append(f" Latest: {pkg_info['latest']}")
|
||||
lines.append(f" Major diff: {pkg_info['major_diff']}")
|
||||
lines.append("")
|
||||
else:
|
||||
lines.append("✓ No packages more than 2 major versions behind.")
|
||||
lines.append("")
|
||||
|
||||
if outdated_but_not_critical:
|
||||
lines.append(f"Outdated packages (within 2 major versions):")
|
||||
lines.append("-" * 60)
|
||||
for pkg_info in outdated_but_not_critical:
|
||||
lines.append(f" {pkg_info['package']}: {pkg_info['installed']} -> {pkg_info['latest']} (major diff: {pkg_info['major_diff']})")
|
||||
lines.append("")
|
||||
|
||||
if missing:
|
||||
lines.append(f"Missing packages (not installed):")
|
||||
lines.append("-" * 60)
|
||||
for pkg in missing:
|
||||
lines.append(f" {pkg}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("=" * 60)
|
||||
lines.append("For full details, run: python3 -m pip list --outdated")
|
||||
lines.append("=" * 60)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def generate_json_report(
|
||||
very_outdated: List[dict],
|
||||
missing: List[str],
|
||||
outdated_but_not_critical: List[dict],
|
||||
requirements_path: str
|
||||
) -> str:
|
||||
"""Generate a JSON staleness report."""
|
||||
report = {
|
||||
'requirements_file': requirements_path,
|
||||
'summary': {
|
||||
'total_dependencies': len(very_outdated) + len(missing) + len(outdated_but_not_critical),
|
||||
'very_outdated_count': len(very_outdated),
|
||||
'outdated_within_threshold_count': len(outdated_but_not_critical),
|
||||
'missing_count': len(missing)
|
||||
},
|
||||
'very_outdated': very_outdated,
|
||||
'outdated_within_threshold': outdated_but_not_critical,
|
||||
'missing': missing
|
||||
}
|
||||
return json.dumps(report, indent=2)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Check dependency freshness against PyPI latest versions.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--requirements', '-r',
|
||||
default='requirements.txt',
|
||||
help='Path to requirements.txt file (default: requirements.txt)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--json',
|
||||
action='store_true',
|
||||
help='Output report as JSON instead of human-readable text'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
help='Optional output file for the report (default: stdout)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse requirements
|
||||
required_packages = parse_requirements(args.requirements)
|
||||
if not required_packages:
|
||||
print("No packages found in requirements file.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Get installed and outdated package data
|
||||
installed_packages = get_installed_packages()
|
||||
outdated_packages = get_outdated_packages()
|
||||
|
||||
# Analyze dependencies
|
||||
very_outdated, missing, outdated_but_not_critical = analyze_dependencies(
|
||||
required_packages, installed_packages, outdated_packages
|
||||
)
|
||||
|
||||
# Generate report
|
||||
if args.json:
|
||||
report = generate_json_report(very_outdated, missing, outdated_but_not_critical, args.requirements)
|
||||
else:
|
||||
report = generate_human_report(very_outdated, missing, outdated_but_not_critical, args.requirements)
|
||||
|
||||
# Output report
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(report + '\n')
|
||||
else:
|
||||
print(report)
|
||||
|
||||
# Exit code: 0 if no very outdated deps, 1 otherwise
|
||||
exit_code = 1 if very_outdated else 0
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -22,95 +22,114 @@ import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from session_reader import extract_conversation, read_session
|
||||
|
||||
|
||||
def compute_hash(text: str) -> str:
|
||||
"""Content hash for deduplication."""
|
||||
return hashlib.sha256(text.encode()).hexdigest()[:16]
|
||||
|
||||
|
||||
def extract_pairs_from_conversation(conversation: list, session_id: str, model: str,
|
||||
min_ratio: float = 1.5,
|
||||
def extract_pairs_from_session(session_data: dict, min_ratio: float = 1.5,
|
||||
min_response_words: int = 20) -> list:
|
||||
"""Extract terse→rich pairs from a normalized conversation."""
|
||||
"""Extract terse→rich pairs from a single session object."""
|
||||
pairs = []
|
||||
conversations = session_data.get("conversations", [])
|
||||
session_id = session_data.get("id", "unknown")
|
||||
model = session_data.get("model", "unknown")
|
||||
|
||||
seen_hashes = set()
|
||||
|
||||
for i, msg in enumerate(conversation):
|
||||
# Look for assistant responses
|
||||
if msg.get('role') != 'assistant':
|
||||
for i, msg in enumerate(conversations):
|
||||
# Look for assistant/gpt responses
|
||||
if msg.get("from") not in ("gpt", "assistant"):
|
||||
continue
|
||||
|
||||
response_text = msg.get('content', '')
|
||||
response_text = msg.get("value", "")
|
||||
if not response_text or len(response_text.split()) < min_response_words:
|
||||
continue
|
||||
|
||||
# Find the preceding user message
|
||||
# Find the preceding human message
|
||||
prompt_text = ""
|
||||
for j in range(i - 1, -1, -1):
|
||||
if conversation[j].get('role') == 'user':
|
||||
prompt_text = conversation[j].get('content', '')
|
||||
if conversations[j].get("from") == "human":
|
||||
prompt_text = conversations[j].get("value", "")
|
||||
break
|
||||
|
||||
if not prompt_text:
|
||||
continue
|
||||
|
||||
# Filter: skip tool results, system messages embedded as human
|
||||
if prompt_text.startswith('{') and 'output' in prompt_text[:100]:
|
||||
continue
|
||||
if prompt_text.startswith('# SOUL.md') or prompt_text.startswith('You are'):
|
||||
continue
|
||||
if prompt_text.startswith("{") and "output" in prompt_text[:100]:
|
||||
continue # likely a tool result
|
||||
if prompt_text.startswith("# SOUL.md") or prompt_text.startswith("You are"):
|
||||
continue # system prompt leak
|
||||
|
||||
# Quality filters
|
||||
prompt_words = len(prompt_text.split())
|
||||
response_words = len(response_text.split())
|
||||
|
||||
# Must have meaningful length ratio
|
||||
if prompt_words == 0 or response_words == 0:
|
||||
continue
|
||||
ratio = response_words / prompt_words
|
||||
if ratio < min_ratio:
|
||||
continue
|
||||
|
||||
code_blocks = response_text.count('```')
|
||||
if code_blocks >= 4 and len(response_text.replace('```', '').strip()) < 50:
|
||||
# Skip responses that are mostly code
|
||||
code_blocks = response_text.count("```")
|
||||
if code_blocks >= 4 and len(response_text.replace("```", "").strip()) < 50:
|
||||
continue
|
||||
|
||||
if 'tool_call' in response_text[:100] or 'function_call' in response_text[:100]:
|
||||
# Skip responses with tool call artifacts
|
||||
if "tool_call" in response_text[:100] or "function_call" in response_text[:100]:
|
||||
continue
|
||||
|
||||
# Deduplicate by content hash
|
||||
content_hash = compute_hash(prompt_text + response_text[:200])
|
||||
if content_hash in seen_hashes:
|
||||
continue
|
||||
seen_hashes.add(content_hash)
|
||||
|
||||
# Clean up response: remove markdown headers if too many
|
||||
clean_response = response_text
|
||||
|
||||
pairs.append({
|
||||
'terse': prompt_text.strip(),
|
||||
'rich': clean_response.strip(),
|
||||
'source': session_id,
|
||||
'model': model,
|
||||
'prompt_words': prompt_words,
|
||||
'response_words': response_words,
|
||||
'ratio': round(ratio, 2),
|
||||
"terse": prompt_text.strip(),
|
||||
"rich": clean_response.strip(),
|
||||
"source": session_id,
|
||||
"model": model,
|
||||
"prompt_words": prompt_words,
|
||||
"response_words": response_words,
|
||||
"ratio": round(ratio, 2),
|
||||
})
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def extract_from_jsonl_file(filepath: str, **kwargs) -> list:
|
||||
"""Extract pairs from a session JSONL file."""
|
||||
pairs = []
|
||||
path = Path(filepath)
|
||||
|
||||
def extract_from_jsonl_file(path: str, **kwargs) -> list:
|
||||
"""Read a session file and extract training pairs using normalized conversation."""
|
||||
session_messages = read_session(path)
|
||||
if not session_messages:
|
||||
return []
|
||||
conversation = extract_conversation(session_messages)
|
||||
# Derive session_id and model from first real message metadata
|
||||
first_msg = next((m for m in session_messages if m.get('role') or m.get('from')), {})
|
||||
session_id = first_msg.get('meta_session_id', Path(path).name)
|
||||
model = first_msg.get('model', 'unknown')
|
||||
return extract_pairs_from_conversation(conversation, session_id, model, **kwargs)
|
||||
if not path.exists():
|
||||
print(f"Warning: {filepath} not found", file=sys.stderr)
|
||||
return pairs
|
||||
|
||||
content = path.read_text()
|
||||
lines = content.strip().split("\n")
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
session = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
session_pairs = extract_pairs_from_session(session, **kwargs)
|
||||
pairs.extend(session_pairs)
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def deduplicate_pairs(pairs: list) -> list:
|
||||
|
||||
179
scripts/test_dependency_freshness.py
Normal file
179
scripts/test_dependency_freshness.py
Normal file
@@ -0,0 +1,179 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/dependency_freshness.py — 9.7 Dependency Freshness."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# Import target module
|
||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"dependency_freshness",
|
||||
os.path.join(os.path.dirname(__file__) or ".", "dependency_freshness.py")
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
parse_requirements = mod.parse_requirements
|
||||
get_major_version = mod.get_major_version
|
||||
is_more_than_two_majors_behind = mod.is_more_than_two_majors_behind
|
||||
analyze_dependencies = mod.analyze_dependencies
|
||||
|
||||
|
||||
def test_parse_requirements_simple():
|
||||
"""Parse a simple package line."""
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
|
||||
f.write("requests\n")
|
||||
tmp = f.name
|
||||
try:
|
||||
pkgs = parse_requirements(tmp)
|
||||
assert pkgs == ["requests"], f"got {pkgs}"
|
||||
print("PASS: test_parse_requirements_simple")
|
||||
finally:
|
||||
os.unlink(tmp)
|
||||
|
||||
|
||||
def test_parse_requirements_with_specifiers():
|
||||
"""Parse lines with version specifiers."""
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
|
||||
f.write("pytest>=8,<9\n")
|
||||
f.write("aiohttp>=3.8\n")
|
||||
tmp = f.name
|
||||
try:
|
||||
pkgs = parse_requirements(tmp)
|
||||
assert pkgs == ["pytest", "aiohttp"], f"got {pkgs}"
|
||||
print("PASS: test_parse_requirements_with_specifiers")
|
||||
finally:
|
||||
os.unlink(tmp)
|
||||
|
||||
|
||||
def test_parse_requirements_ignores_comments_and_blanks():
|
||||
"""Comments and blank lines are skipped."""
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
|
||||
f.write("# This is a comment\n")
|
||||
f.write("\n")
|
||||
f.write(" \n")
|
||||
f.write("numpy\n")
|
||||
f.write("# another comment\n")
|
||||
tmp = f.name
|
||||
try:
|
||||
pkgs = parse_requirements(tmp)
|
||||
assert pkgs == ["numpy"], f"got {pkgs}"
|
||||
print("PASS: test_parse_requirements_ignores_comments_and_blanks")
|
||||
finally:
|
||||
os.unlink(tmp)
|
||||
|
||||
|
||||
def test_get_major_version_normal():
|
||||
"""Extract major version from typical semantic strings."""
|
||||
assert get_major_version("1.2.3") == 1
|
||||
assert get_major_version("3.4.5") == 3
|
||||
assert get_major_version("0.11.0") == 0
|
||||
print("PASS: test_get_major_version_normal")
|
||||
|
||||
|
||||
def test_get_major_version_with_rc():
|
||||
"""Prerelease versions still yield major number."""
|
||||
assert get_major_version("2.0.0rc1") == 2
|
||||
assert get_major_version("1.0.0a1") == 1
|
||||
print("PASS: test_get_major_version_with_rc")
|
||||
|
||||
|
||||
def test_is_more_than_two_majors_behind():
|
||||
"""Difference >2 triggers True; <=2 triggers False."""
|
||||
assert is_more_than_two_majors_behind("1.2.3", "4.0.0") is True
|
||||
assert is_more_than_two_majors_behind("3.9.0", "4.0.0") is False
|
||||
assert is_more_than_two_majors_behind("2.1.0", "5.2.0") is True
|
||||
assert is_more_than_two_majors_behind("8.0.0", "9.0.0") is False
|
||||
assert is_more_than_two_majors_behind("4.0.0", "4.0.0") is False
|
||||
print("PASS: test_is_more_than_two_majors_behind")
|
||||
|
||||
|
||||
def test_analyze_dependencies_very_outdated():
|
||||
"""Flag packages more than 2 major versions behind."""
|
||||
required = ["pkg_a", "pkg_b"]
|
||||
installed = {"pkg_a": "1.0.0", "pkg_b": "3.5.2"}
|
||||
outdated = {
|
||||
"pkg_a": {"installed": "1.0.0", "latest": "4.0.0"},
|
||||
"pkg_b": {"installed": "3.5.2", "latest": "4.0.0"},
|
||||
}
|
||||
very_out, missing, outdated_ok = analyze_dependencies(required, installed, outdated)
|
||||
assert len(very_out) == 1 and very_out[0]["package"] == "pkg_a"
|
||||
assert len(missing) == 0
|
||||
assert len(outdated_ok) == 1 and outdated_ok[0]["package"] == "pkg_b"
|
||||
print("PASS: test_analyze_dependencies_very_outdated")
|
||||
|
||||
|
||||
def test_analyze_dependencies_missing():
|
||||
"""Detect packages not installed at all."""
|
||||
required = ["pkg_a", "pkg_missing"]
|
||||
installed = {"pkg_a": "2.0.0"}
|
||||
outdated = {"pkg_a": {"installed": "2.0.0", "latest": "3.0.0"}}
|
||||
very_out, missing, outdated_ok = analyze_dependencies(required, installed, outdated)
|
||||
assert "pkg_missing" in missing
|
||||
assert len(very_out) == 0
|
||||
assert len(outdated_ok) == 1
|
||||
print("PASS: test_analyze_dependencies_missing")
|
||||
|
||||
|
||||
def test_analyze_dependencies_up_to_date():
|
||||
"""Packages up-to-date are not flagged."""
|
||||
required = ["pkg_good"]
|
||||
installed = {"pkg_good": "5.0.0"}
|
||||
outdated = {}
|
||||
very_out, missing, outdated_ok = analyze_dependencies(required, installed, outdated)
|
||||
assert len(very_out) == 0
|
||||
assert len(missing) == 0
|
||||
assert len(outdated_ok) == 0
|
||||
print("PASS: test_analyze_dependencies_up_to_date")
|
||||
|
||||
|
||||
def test_generate_human_report_contains_very_outdated():
|
||||
"""Human report includes very outdated packages."""
|
||||
very_out = [
|
||||
{"package": "oldpkg", "installed": "1.0", "latest": "4.0", "major_diff": 3}
|
||||
]
|
||||
missing = []
|
||||
outdated_ok = []
|
||||
report = mod.generate_human_report(very_out, missing, outdated_ok, "requirements.txt")
|
||||
assert "oldpkg" in report
|
||||
assert "Installed: 1.0" in report
|
||||
assert "Latest: 4.0" in report
|
||||
assert "Major diff: 3" in report
|
||||
print("PASS: test_generate_human_report_contains_very_outdated")
|
||||
|
||||
|
||||
def test_generate_json_report_structure():
|
||||
"""JSON report contains required keys."""
|
||||
very_out = [{"package": "oldpkg", "installed": "1.0", "latest": "4.0", "major_diff": 3}]
|
||||
missing = ["missing_pkg"]
|
||||
outdated_ok = []
|
||||
report_json = mod.generate_json_report(very_out, missing, outdated_ok, "requirements.txt")
|
||||
data = json.loads(report_json)
|
||||
assert "summary" in data
|
||||
assert data["summary"]["very_outdated_count"] == 1
|
||||
assert data["summary"]["missing_count"] == 1
|
||||
assert "very_outdated" in data
|
||||
assert "missing" in data
|
||||
print("PASS: test_generate_json_report_structure")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Running dependency_freshness test suite...")
|
||||
test_parse_requirements_simple()
|
||||
test_parse_requirements_with_specifiers()
|
||||
test_parse_requirements_ignores_comments_and_blanks()
|
||||
test_get_major_version_normal()
|
||||
test_get_major_version_with_rc()
|
||||
test_is_more_than_two_majors_behind()
|
||||
test_analyze_dependencies_very_outdated()
|
||||
test_analyze_dependencies_missing()
|
||||
test_analyze_dependencies_up_to_date()
|
||||
test_generate_human_report_contains_very_outdated()
|
||||
test_generate_json_report_structure()
|
||||
print("ALL TESTS PASSED.")
|
||||
@@ -1,118 +0,0 @@
|
||||
"""
|
||||
Tests for session_pair_harvester — training pair extraction from sessions.
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
||||
from session_pair_harvester import (
|
||||
extract_pairs_from_conversation,
|
||||
extract_from_jsonl_file,
|
||||
deduplicate_pairs,
|
||||
compute_hash,
|
||||
)
|
||||
|
||||
|
||||
class TestSessionPairHarvester(unittest.TestCase):
|
||||
def test_compute_hash_consistent(self):
|
||||
h1 = compute_hash("hello world")
|
||||
h2 = compute_hash("hello world")
|
||||
self.assertEqual(h1, h2)
|
||||
self.assertEqual(len(h1), 16)
|
||||
|
||||
def test_extract_simple_qa_pair(self):
|
||||
"""A simple user→assistant exchange produces one pair."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "What is the capital of France?"},
|
||||
{"role": "assistant", "content": "The capital of France is Paris. It is a major European city renowned for its art, fashion, gastronomy, cultural heritage, and historical significance. The city attracts millions of tourists annually."},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "test_session", "test-model")
|
||||
self.assertEqual(len(pairs), 1)
|
||||
self.assertEqual(pairs[0]["terse"], "What is the capital of France?")
|
||||
self.assertIn("Paris", pairs[0]["rich"])
|
||||
self.assertEqual(pairs[0]["source"], "test_session")
|
||||
|
||||
def test_min_ratio_filter(self):
|
||||
"""Very short responses are filtered out."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "Yes"},
|
||||
{"role": "assistant", "content": "No."},
|
||||
]
|
||||
# Default min_ratio = 1.5, min_words = 20 for response
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
|
||||
self.assertEqual(len(pairs), 0)
|
||||
|
||||
def test_min_words_filter(self):
|
||||
"""Assistant responses below min word count are skipped."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "Explain the project architecture in detail"},
|
||||
{"role": "assistant", "content": "OK."},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=5)
|
||||
self.assertEqual(len(pairs), 0)
|
||||
|
||||
def test_skip_non_assistant_messages(self):
|
||||
"""System and tool messages are ignored."""
|
||||
conversation = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there! How can I help you today?"},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
|
||||
self.assertEqual(len(pairs), 1)
|
||||
self.assertEqual(pairs[0]["terse"], "Hello")
|
||||
|
||||
def test_multiple_pairs_from_one_session(self):
|
||||
"""A conversation with several Q&A turns yields multiple pairs."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "First question?"},
|
||||
{"role": "assistant", "content": "Here is a detailed and comprehensive answer that thoroughly explores multiple aspects of the subject. It provides background context and practical implications for the reader."},
|
||||
{"role": "user", "content": "Second?"},
|
||||
{"role": "assistant", "content": "Another comprehensive response with detailed examples. This includes practical code blocks and thorough explanations to ensure deep understanding of the topic at hand."},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_ratio=1.0)
|
||||
self.assertEqual(len(pairs), 2)
|
||||
|
||||
def test_deduplication_removes_duplicates(self):
|
||||
"""Identical pairs across sessions are deduplicated."""
|
||||
pairs = [
|
||||
{"terse": "q1", "rich": "a1", "source": "s1", "model": "m"},
|
||||
{"terse": "q1", "rich": "a1", "source": "s2", "model": "m"},
|
||||
{"terse": "q2", "rich": "a2", "source": "s1", "model": "m"},
|
||||
]
|
||||
unique = deduplicate_pairs(pairs)
|
||||
self.assertEqual(len(unique), 2)
|
||||
sources = {p["source"] for p in unique}
|
||||
# First unique pair can be from either s1 or s2
|
||||
self.assertIn("s1", sources)
|
||||
|
||||
def test_integration_with_test_sessions(self):
|
||||
"""Harvester finds pairs in real test session files."""
|
||||
repo_root = Path(__file__).parent.parent
|
||||
test_sessions_dir = repo_root / "test_sessions"
|
||||
if not test_sessions_dir.exists():
|
||||
self.skipTest("test_sessions not found")
|
||||
|
||||
pairs = []
|
||||
for jsonl_file in sorted(test_sessions_dir.glob("*.jsonl")):
|
||||
pairs.extend(extract_from_jsonl_file(str(jsonl_file)))
|
||||
|
||||
self.assertGreater(len(pairs), 0, "Should extract at least one pair from test_sessions")
|
||||
for p in pairs:
|
||||
self.assertIn("terse", p)
|
||||
self.assertIn("rich", p)
|
||||
self.assertIn("source", p)
|
||||
self.assertIn("model", p)
|
||||
# Verify content exists
|
||||
self.assertGreater(len(p["terse"]), 0)
|
||||
self.assertGreater(len(p["rich"]), 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user