Compare commits
1 Commits
step35/137
...
step35/91-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1a728f5f4 |
@@ -1,203 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Release Note Analyzer — Monitor dependency releases and extract structured insights.
|
||||
|
||||
Fetches GitHub releases for configured repositories, parses changelogs,
|
||||
categorizes changes, and flags breaking changes.
|
||||
|
||||
Usage:
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo1,owner/repo2
|
||||
python3 scripts/release_note_analyzer.py --repos numpy/numpy --limit 5
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --output metrics/releases.json
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --token $GITHUB_TOKEN
|
||||
|
||||
Output:
|
||||
JSON with per-release structure: version, date, url, categories (features, fixes, breaking), raw_body
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, field, asdict
|
||||
import os
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReleaseAnalysis:
|
||||
version: str
|
||||
date: str
|
||||
url: str
|
||||
categories: Dict[str, List[str]] = field(default_factory=dict)
|
||||
breaking_change_flags: List[str] = field(default_factory=list)
|
||||
raw_body: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
def fetch_github_releases(repo: str, token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch latest releases from GitHub API."""
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
url = f"https://api.github.com/repos/{repo}/releases?per_page={limit}"
|
||||
headers = {"Accept": "application/vnd.github.v3+json"}
|
||||
if token:
|
||||
headers["Authorization"] = f"token {token}"
|
||||
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"Error fetching releases for {repo}: HTTP {e.code}", file=sys.stderr)
|
||||
return []
|
||||
except Exception as e:
|
||||
print(f"Error fetching releases for {repo}: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
|
||||
def categorize_changelog(body: str) -> Dict[str, List[str]]:
|
||||
"""Categorize release note lines into features, fixes, and other."""
|
||||
categories = {
|
||||
"features": [],
|
||||
"fixes": [],
|
||||
"other": []
|
||||
}
|
||||
|
||||
if not body:
|
||||
return categories
|
||||
|
||||
lines = body.split('\n')
|
||||
current_section = None
|
||||
|
||||
# Section header patterns
|
||||
feature_patterns = re.compile(r'^(?:features?|new|add|enhancement)s?', re.IGNORECASE)
|
||||
fix_patterns = re.compile(r'^(?:fix(?:es|ed)?|bug|patch|correction)', re.IGNORECASE)
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
|
||||
# Check for section headers (e.g., "### Features", "## Added")
|
||||
header_match = re.match(r'^#{1,3}\s+(.+)$', stripped)
|
||||
if header_match:
|
||||
header = header_match.group(1).lower()
|
||||
if feature_patterns.search(header):
|
||||
current_section = "features"
|
||||
elif fix_patterns.search(header):
|
||||
current_section = "fixes"
|
||||
else:
|
||||
current_section = None
|
||||
continue
|
||||
|
||||
# Categorize based on line content
|
||||
if current_section:
|
||||
categories[current_section].append(stripped)
|
||||
else:
|
||||
# Infer from keywords
|
||||
if re.search(r'^(?:added|new|feature|introdu)', stripped, re.IGNORECASE):
|
||||
categories["features"].append(stripped)
|
||||
elif re.search(r'^(?:fix|bug|patch|resolved)', stripped, re.IGNORECASE):
|
||||
categories["fixes"].append(stripped)
|
||||
else:
|
||||
categories["other"].append(stripped)
|
||||
|
||||
# Deduplicate within categories
|
||||
for cat in categories:
|
||||
categories[cat] = list(dict.fromkeys(categories[cat]))
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
def detect_breaking_changes(body: str) -> List[str]:
|
||||
"""Detect and extract potential breaking change indicators."""
|
||||
breaking_indicators = []
|
||||
lines = body.split('\n')
|
||||
|
||||
# Keywords that suggest breaking changes
|
||||
breaking_keywords = re.compile(
|
||||
r'\b(?:BREAKING|breaking\s+change|backward\s+incompatible|'
|
||||
r'removed\s+.*?API|deprecated.*?removed|'
|
||||
r'major\s+version|'
|
||||
r'not\s+backward\s+compatible)\b',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
for line in lines:
|
||||
if breaking_keywords.search(line):
|
||||
breaking_indicators.append(line.strip())
|
||||
|
||||
return breaking_indicators
|
||||
|
||||
|
||||
def analyze_releases( repos: List[str], token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch and analyze releases for all configured repos."""
|
||||
all_releases = []
|
||||
|
||||
for repo in repos:
|
||||
repo = repo.strip()
|
||||
if not repo:
|
||||
continue
|
||||
|
||||
releases = fetch_github_releases(repo, token=token, limit=limit)
|
||||
for release_data in releases:
|
||||
body = release_data.get('body') or ""
|
||||
tag = release_data.get('tag_name', 'unknown')
|
||||
date = release_data.get('published_at', '')
|
||||
url = release_data.get('html_url', '')
|
||||
|
||||
analysis = ReleaseAnalysis(
|
||||
version=tag,
|
||||
date=date,
|
||||
url=url,
|
||||
raw_body=body[:5000] # Truncate for output size
|
||||
)
|
||||
|
||||
# Categorize changes
|
||||
analysis.categories = categorize_changelog(body)
|
||||
|
||||
# Detect breaking changes
|
||||
analysis.breaking_change_flags = detect_breaking_changes(body)
|
||||
|
||||
all_releases.append(analysis.to_dict())
|
||||
|
||||
return all_releases
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Analyze GitHub release notes for changes and breaking changes")
|
||||
parser.add_argument('--repos', required=True, help='Comma-separated list of GitHub repos (owner/repo)')
|
||||
parser.add_argument('--token', help='GitHub API token (or set GITHUB_TOKEN env var)')
|
||||
parser.add_argument('--limit', type=int, default=10, help='Max releases per repo (default: 10)')
|
||||
parser.add_argument('--output', help='Write JSON output to file (default: stdout)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
repos = [r.strip() for r in args.repos.split(',')]
|
||||
token = args.token or os.environ.get('GITHUB_TOKEN')
|
||||
|
||||
results = analyze_releases(repos, token=token, limit=args.limit)
|
||||
|
||||
output = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"repos": repos,
|
||||
"release_count": len(results),
|
||||
"releases": results
|
||||
}
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(output, f, indent=2)
|
||||
print(f"Wrote {len(results)} releases to {args.output}")
|
||||
else:
|
||||
print(json.dumps(output, indent=2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -22,114 +22,95 @@ import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from session_reader import extract_conversation, read_session
|
||||
|
||||
|
||||
def compute_hash(text: str) -> str:
|
||||
"""Content hash for deduplication."""
|
||||
return hashlib.sha256(text.encode()).hexdigest()[:16]
|
||||
|
||||
|
||||
def extract_pairs_from_session(session_data: dict, min_ratio: float = 1.5,
|
||||
def extract_pairs_from_conversation(conversation: list, session_id: str, model: str,
|
||||
min_ratio: float = 1.5,
|
||||
min_response_words: int = 20) -> list:
|
||||
"""Extract terse→rich pairs from a single session object."""
|
||||
"""Extract terse→rich pairs from a normalized conversation."""
|
||||
pairs = []
|
||||
conversations = session_data.get("conversations", [])
|
||||
session_id = session_data.get("id", "unknown")
|
||||
model = session_data.get("model", "unknown")
|
||||
|
||||
seen_hashes = set()
|
||||
|
||||
for i, msg in enumerate(conversations):
|
||||
# Look for assistant/gpt responses
|
||||
if msg.get("from") not in ("gpt", "assistant"):
|
||||
for i, msg in enumerate(conversation):
|
||||
# Look for assistant responses
|
||||
if msg.get('role') != 'assistant':
|
||||
continue
|
||||
|
||||
response_text = msg.get("value", "")
|
||||
response_text = msg.get('content', '')
|
||||
if not response_text or len(response_text.split()) < min_response_words:
|
||||
continue
|
||||
|
||||
# Find the preceding human message
|
||||
# Find the preceding user message
|
||||
prompt_text = ""
|
||||
for j in range(i - 1, -1, -1):
|
||||
if conversations[j].get("from") == "human":
|
||||
prompt_text = conversations[j].get("value", "")
|
||||
if conversation[j].get('role') == 'user':
|
||||
prompt_text = conversation[j].get('content', '')
|
||||
break
|
||||
|
||||
if not prompt_text:
|
||||
continue
|
||||
|
||||
# Filter: skip tool results, system messages embedded as human
|
||||
if prompt_text.startswith("{") and "output" in prompt_text[:100]:
|
||||
continue # likely a tool result
|
||||
if prompt_text.startswith("# SOUL.md") or prompt_text.startswith("You are"):
|
||||
continue # system prompt leak
|
||||
if prompt_text.startswith('{') and 'output' in prompt_text[:100]:
|
||||
continue
|
||||
if prompt_text.startswith('# SOUL.md') or prompt_text.startswith('You are'):
|
||||
continue
|
||||
|
||||
# Quality filters
|
||||
prompt_words = len(prompt_text.split())
|
||||
response_words = len(response_text.split())
|
||||
|
||||
# Must have meaningful length ratio
|
||||
if prompt_words == 0 or response_words == 0:
|
||||
continue
|
||||
ratio = response_words / prompt_words
|
||||
if ratio < min_ratio:
|
||||
continue
|
||||
|
||||
# Skip responses that are mostly code
|
||||
code_blocks = response_text.count("```")
|
||||
if code_blocks >= 4 and len(response_text.replace("```", "").strip()) < 50:
|
||||
code_blocks = response_text.count('```')
|
||||
if code_blocks >= 4 and len(response_text.replace('```', '').strip()) < 50:
|
||||
continue
|
||||
|
||||
# Skip responses with tool call artifacts
|
||||
if "tool_call" in response_text[:100] or "function_call" in response_text[:100]:
|
||||
if 'tool_call' in response_text[:100] or 'function_call' in response_text[:100]:
|
||||
continue
|
||||
|
||||
# Deduplicate by content hash
|
||||
content_hash = compute_hash(prompt_text + response_text[:200])
|
||||
if content_hash in seen_hashes:
|
||||
continue
|
||||
seen_hashes.add(content_hash)
|
||||
|
||||
# Clean up response: remove markdown headers if too many
|
||||
clean_response = response_text
|
||||
|
||||
pairs.append({
|
||||
"terse": prompt_text.strip(),
|
||||
"rich": clean_response.strip(),
|
||||
"source": session_id,
|
||||
"model": model,
|
||||
"prompt_words": prompt_words,
|
||||
"response_words": response_words,
|
||||
"ratio": round(ratio, 2),
|
||||
'terse': prompt_text.strip(),
|
||||
'rich': clean_response.strip(),
|
||||
'source': session_id,
|
||||
'model': model,
|
||||
'prompt_words': prompt_words,
|
||||
'response_words': response_words,
|
||||
'ratio': round(ratio, 2),
|
||||
})
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def extract_from_jsonl_file(filepath: str, **kwargs) -> list:
|
||||
"""Extract pairs from a session JSONL file."""
|
||||
pairs = []
|
||||
path = Path(filepath)
|
||||
|
||||
if not path.exists():
|
||||
print(f"Warning: {filepath} not found", file=sys.stderr)
|
||||
return pairs
|
||||
|
||||
content = path.read_text()
|
||||
lines = content.strip().split("\n")
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
session = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
session_pairs = extract_pairs_from_session(session, **kwargs)
|
||||
pairs.extend(session_pairs)
|
||||
|
||||
return pairs
|
||||
def extract_from_jsonl_file(path: str, **kwargs) -> list:
|
||||
"""Read a session file and extract training pairs using normalized conversation."""
|
||||
session_messages = read_session(path)
|
||||
if not session_messages:
|
||||
return []
|
||||
conversation = extract_conversation(session_messages)
|
||||
# Derive session_id and model from first real message metadata
|
||||
first_msg = next((m for m in session_messages if m.get('role') or m.get('from')), {})
|
||||
session_id = first_msg.get('meta_session_id', Path(path).name)
|
||||
model = first_msg.get('model', 'unknown')
|
||||
return extract_pairs_from_conversation(conversation, session_id, model, **kwargs)
|
||||
|
||||
|
||||
def deduplicate_pairs(pairs: list) -> list:
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/release_note_analyzer.py"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or ".", ".."))
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"release_note_analyzer",
|
||||
os.path.join(os.path.dirname(__file__) or ".", "..", "scripts", "release_note_analyzer.py")
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
categorize_changelog = mod.categorize_changelog
|
||||
detect_breaking_changes = mod.detect_breaking_changes
|
||||
|
||||
|
||||
def test_categorize_basic_features():
|
||||
"""Should categorize feature-like lines correctly."""
|
||||
body = """
|
||||
### Features
|
||||
- Added new API endpoint
|
||||
- Introduced batch processing
|
||||
|
||||
### Bug Fixes
|
||||
- Fixed memory leak
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["features"]) >= 1, f"Got features: {categories['features']}"
|
||||
assert any("batch" in line or "API" in line for line in categories["features"])
|
||||
assert any("memory leak" in line for line in categories["fixes"])
|
||||
print("PASS: test_categorize_basic_features")
|
||||
|
||||
|
||||
def test_categorize_fixes():
|
||||
"""Should categorize bug fix lines correctly."""
|
||||
body = """
|
||||
## Fixed
|
||||
- Resolved crash on startup
|
||||
- Patched security vulnerability
|
||||
|
||||
## Changed
|
||||
- Updated documentation
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert any("crash" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
assert any("security" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
print("PASS: test_categorize_fixes")
|
||||
|
||||
|
||||
def test_categorize_other():
|
||||
"""Uncategorized lines should go to 'other'."""
|
||||
body = "- Some random note\n- Another note"
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["other"]) >= 2
|
||||
print("PASS: test_categorize_other")
|
||||
|
||||
|
||||
def test_detect_breaking_changes():
|
||||
"""Should flag lines containing breaking change keywords."""
|
||||
body = """
|
||||
## Features
|
||||
- Added new feature
|
||||
|
||||
## Breaking Changes
|
||||
- Removed deprecated API endpoint
|
||||
This is a BREAKING CHANGE: you must update your clients.
|
||||
|
||||
We also removed support for Python 3.8.
|
||||
"""
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 2, f"Expected >=2 breaking flags, got {len(flags)}: {flags}"
|
||||
assert any("deprecated API" in f for f in flags), f"Missing: {flags}"
|
||||
assert any("BREAKING CHANGE" in f for f in flags), f"Missing: {flags}"
|
||||
print("PASS: test_detect_breaking_changes")
|
||||
|
||||
|
||||
def test_detect_breaking_changes_case_insensitive():
|
||||
"""Breaking change detection should be case-insensitive."""
|
||||
body = "This is a breaking change: old behavior removed"
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 1
|
||||
print("PASS: test_detect_breaking_changes_case_insensitive")
|
||||
|
||||
|
||||
def test_empty_body():
|
||||
"""Empty body should produce empty categories and no breaking flags."""
|
||||
body = ""
|
||||
categories = categorize_changelog(body)
|
||||
assert categories["features"] == []
|
||||
assert categories["fixes"] == []
|
||||
assert detect_breaking_changes(body) == []
|
||||
print("PASS: test_empty_body")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_categorize_basic_features()
|
||||
test_categorize_fixes()
|
||||
test_categorize_other()
|
||||
test_detect_breaking_changes()
|
||||
test_detect_breaking_changes_case_insensitive()
|
||||
test_empty_body()
|
||||
print("\nAll release_note_analyzer tests passed.")
|
||||
118
tests/test_session_pair_harvester.py
Normal file
118
tests/test_session_pair_harvester.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""
|
||||
Tests for session_pair_harvester — training pair extraction from sessions.
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
||||
from session_pair_harvester import (
|
||||
extract_pairs_from_conversation,
|
||||
extract_from_jsonl_file,
|
||||
deduplicate_pairs,
|
||||
compute_hash,
|
||||
)
|
||||
|
||||
|
||||
class TestSessionPairHarvester(unittest.TestCase):
|
||||
def test_compute_hash_consistent(self):
|
||||
h1 = compute_hash("hello world")
|
||||
h2 = compute_hash("hello world")
|
||||
self.assertEqual(h1, h2)
|
||||
self.assertEqual(len(h1), 16)
|
||||
|
||||
def test_extract_simple_qa_pair(self):
|
||||
"""A simple user→assistant exchange produces one pair."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "What is the capital of France?"},
|
||||
{"role": "assistant", "content": "The capital of France is Paris. It is a major European city renowned for its art, fashion, gastronomy, cultural heritage, and historical significance. The city attracts millions of tourists annually."},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "test_session", "test-model")
|
||||
self.assertEqual(len(pairs), 1)
|
||||
self.assertEqual(pairs[0]["terse"], "What is the capital of France?")
|
||||
self.assertIn("Paris", pairs[0]["rich"])
|
||||
self.assertEqual(pairs[0]["source"], "test_session")
|
||||
|
||||
def test_min_ratio_filter(self):
|
||||
"""Very short responses are filtered out."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "Yes"},
|
||||
{"role": "assistant", "content": "No."},
|
||||
]
|
||||
# Default min_ratio = 1.5, min_words = 20 for response
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
|
||||
self.assertEqual(len(pairs), 0)
|
||||
|
||||
def test_min_words_filter(self):
|
||||
"""Assistant responses below min word count are skipped."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "Explain the project architecture in detail"},
|
||||
{"role": "assistant", "content": "OK."},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=5)
|
||||
self.assertEqual(len(pairs), 0)
|
||||
|
||||
def test_skip_non_assistant_messages(self):
|
||||
"""System and tool messages are ignored."""
|
||||
conversation = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there! How can I help you today?"},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
|
||||
self.assertEqual(len(pairs), 1)
|
||||
self.assertEqual(pairs[0]["terse"], "Hello")
|
||||
|
||||
def test_multiple_pairs_from_one_session(self):
|
||||
"""A conversation with several Q&A turns yields multiple pairs."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "First question?"},
|
||||
{"role": "assistant", "content": "Here is a detailed and comprehensive answer that thoroughly explores multiple aspects of the subject. It provides background context and practical implications for the reader."},
|
||||
{"role": "user", "content": "Second?"},
|
||||
{"role": "assistant", "content": "Another comprehensive response with detailed examples. This includes practical code blocks and thorough explanations to ensure deep understanding of the topic at hand."},
|
||||
]
|
||||
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_ratio=1.0)
|
||||
self.assertEqual(len(pairs), 2)
|
||||
|
||||
def test_deduplication_removes_duplicates(self):
|
||||
"""Identical pairs across sessions are deduplicated."""
|
||||
pairs = [
|
||||
{"terse": "q1", "rich": "a1", "source": "s1", "model": "m"},
|
||||
{"terse": "q1", "rich": "a1", "source": "s2", "model": "m"},
|
||||
{"terse": "q2", "rich": "a2", "source": "s1", "model": "m"},
|
||||
]
|
||||
unique = deduplicate_pairs(pairs)
|
||||
self.assertEqual(len(unique), 2)
|
||||
sources = {p["source"] for p in unique}
|
||||
# First unique pair can be from either s1 or s2
|
||||
self.assertIn("s1", sources)
|
||||
|
||||
def test_integration_with_test_sessions(self):
|
||||
"""Harvester finds pairs in real test session files."""
|
||||
repo_root = Path(__file__).parent.parent
|
||||
test_sessions_dir = repo_root / "test_sessions"
|
||||
if not test_sessions_dir.exists():
|
||||
self.skipTest("test_sessions not found")
|
||||
|
||||
pairs = []
|
||||
for jsonl_file in sorted(test_sessions_dir.glob("*.jsonl")):
|
||||
pairs.extend(extract_from_jsonl_file(str(jsonl_file)))
|
||||
|
||||
self.assertGreater(len(pairs), 0, "Should extract at least one pair from test_sessions")
|
||||
for p in pairs:
|
||||
self.assertIn("terse", p)
|
||||
self.assertIn("rich", p)
|
||||
self.assertIn("source", p)
|
||||
self.assertIn("model", p)
|
||||
# Verify content exists
|
||||
self.assertGreater(len(p["terse"]), 0)
|
||||
self.assertGreater(len(p["rich"]), 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user