Compare commits
1 Commits
step35/197
...
step35/137
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b823d4e308 |
@@ -43,26 +43,9 @@ The harvester writes to both. The bootstrapper reads from index.json. Humans edi
|
||||
| `last_confirmed` | date | no | ISO-8601 date last seen in a session |
|
||||
| `expires` | date | no | Optional. After this date, fact is stale |
|
||||
| `related` | string[] | no | IDs of related facts |
|
||||
| `provenance` | object | no | Provenance metadata — see Provenance Object section below |
|
||||
|
||||
### ID Format: `{domain}:{category}:{sequence}`
|
||||
|
||||
|
||||
|
||||
### Provenance Object
|
||||
|
||||
Every fact may include a [`provenance`](#fact-object) field that tracks its origin.
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `source_session` | string | yes | Session ID / file path where this fact was extracted |
|
||||
| `source_model` | string | yes | Model name used for extraction (e.g., `xiaomi/mimo-v2-pro`) |
|
||||
| `source_provider` | string | yes | Provider name (`nous`, `openrouter`, `anthropic`, `openai`, etc.) |
|
||||
| `timestamp` | date-time | yes | Extraction timestamp (ISO-8601 UTC) |
|
||||
| `extraction_method` | enum | yes | `llm_extraction`, `manual`, or `retroactive_harvest` |
|
||||
| `confidence` | float | yes | Confidence at extraction time (0.0–1.0) |
|
||||
| `verified` | boolean | yes | `true` if fact has been manually reviewed, else `false` |
|
||||
|
||||
### Categories
|
||||
|
||||
| Category | Definition |
|
||||
@@ -102,35 +85,6 @@ knowledge/
|
||||
└── {agent-type}.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Provenance Object (added via `write_knowledge()` and harvester)
|
||||
|
||||
```json
|
||||
{
|
||||
"source_session": "string — session ID or file path",
|
||||
"source_model": "string — model used for extraction",
|
||||
"source_provider": "string — provider name (nous, openrouter, etc.)",
|
||||
"timestamp": "string — ISO-8601 UTC extraction time",
|
||||
"extraction_method": "string — llm_extraction|manual|retroactive_harvest",
|
||||
"confidence": "float — 0.0–1.0 confidence from extraction",
|
||||
"verified": "boolean — whether fact has been manually verified"
|
||||
}
|
||||
```
|
||||
|
||||
The `provenance` field is attached to every fact harvested via `write_knowledge()`. It provides traceability: which session produced this fact, which model/provider extracted it, when, and with what confidence.
|
||||
|
||||
| Provenance Field | Type | Required | Description |
|
||||
|------------------|------|----------|-------------|
|
||||
| `source_session` | string | yes | Session ID / file path where extracted |
|
||||
| `source_model` | string | yes | Model name (e.g., `xiaomi/mimo-v2-pro`) |
|
||||
| `source_provider` | string | yes | Provider (`nous`, `openrouter`, `anthropic`, `openai`) |
|
||||
| `timestamp` | date-time | yes | Extraction timestamp (ISO-8601) |
|
||||
| `extraction_method` | enum | yes | `llm_extraction`, `manual`, or `retroactive_harvest` |
|
||||
| `confidence` | float | yes | Confidence score (0.0–1.0) at extraction time |
|
||||
| `verified` | boolean | yes | `true` if manually reviewed, else `false` |
|
||||
|
||||
|
||||
## YAML File Format
|
||||
|
||||
YAML files use frontmatter for metadata, then markdown sections with fact entries:
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Knowledge Provenance",
|
||||
"description": "Provenance metadata attached to every knowledge fact",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"source_session",
|
||||
"source_model",
|
||||
"source_provider",
|
||||
"timestamp"
|
||||
],
|
||||
"properties": {
|
||||
"source_session": {
|
||||
"type": "string",
|
||||
"description": "Session ID or file path where this fact was extracted"
|
||||
},
|
||||
"source_model": {
|
||||
"type": "string",
|
||||
"description": "Model used for extraction (e.g., 'xiaomi/mimo-v2-pro')"
|
||||
},
|
||||
"source_provider": {
|
||||
"type": "string",
|
||||
"description": "Provider name (nous, openrouter, anthropic, etc.)"
|
||||
},
|
||||
"timestamp": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "UTC ISO-8601 timestamp when this fact was extracted"
|
||||
},
|
||||
"extraction_method": {
|
||||
"type": "string",
|
||||
"description": "How the fact was extracted (llm_extraction, manual, retroactive_harvest)",
|
||||
"enum": [
|
||||
"llm_extraction",
|
||||
"manual",
|
||||
"retroactive_harvest"
|
||||
],
|
||||
"default": "llm_extraction"
|
||||
},
|
||||
"confidence": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
"description": "Confidence assigned during extraction (copied from top-level fact)"
|
||||
},
|
||||
"verified": {
|
||||
"type": "boolean",
|
||||
"description": "Whether this fact has been manually verified",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,22 +27,6 @@ sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, extract_conversation, truncate_for_context, messages_to_text
|
||||
|
||||
def extract_provider(api_base: str) -> str:
|
||||
"""Infer provider name from API base URL."""
|
||||
url = api_base.lower()
|
||||
if 'nousresearch' in url or 'nous' in url:
|
||||
return 'nous'
|
||||
if 'openrouter' in url:
|
||||
return 'openrouter'
|
||||
if 'anthropic' in url:
|
||||
return 'anthropic'
|
||||
if 'openai' in url:
|
||||
return 'openai'
|
||||
# Fallback: try to extract hostname
|
||||
from urllib.parse import urlparse
|
||||
host = urlparse(api_base).netloc
|
||||
return host.split('.')[0] if host else 'unknown'
|
||||
|
||||
# --- Configuration ---
|
||||
|
||||
DEFAULT_API_BASE = os.environ.get("HARVESTER_API_BASE", "https://api.nousresearch.com/v1")
|
||||
@@ -245,34 +229,15 @@ def validate_fact(fact: dict) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def write_knowledge(index: dict, new_facts: list[dict], knowledge_dir: str, source_session: str = "", model: str = "", provider: str = ""):
|
||||
"""Write new facts to the knowledge store.
|
||||
|
||||
Adds provenance metadata to each fact. If model/provider are empty, tries to
|
||||
infer from environment or defaults.
|
||||
"""
|
||||
def write_knowledge(index: dict, new_facts: list[dict], knowledge_dir: str, source_session: str = ""):
|
||||
"""Write new facts to the knowledge store."""
|
||||
kdir = Path(knowledge_dir)
|
||||
kdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Determine model/provider defaults if not provided
|
||||
model = model or os.environ.get("HARVESTER_MODEL", "xiaomi/mimo-v2-pro")
|
||||
provider = provider or os.environ.get("HARVESTER_PROVIDER", "nous")
|
||||
|
||||
timestamp = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
# Add provenance to each fact
|
||||
# Add source tracking to each fact
|
||||
for fact in new_facts:
|
||||
provenance = {
|
||||
'source_session': source_session,
|
||||
'source_model': model,
|
||||
'source_provider': provider,
|
||||
'timestamp': timestamp,
|
||||
'extraction_method': 'llm_extraction',
|
||||
'confidence': fact.get('confidence', 0.5),
|
||||
'verified': False
|
||||
}
|
||||
fact['provenance'] = provenance
|
||||
fact['harvested_at'] = timestamp
|
||||
fact['source_session'] = source_session
|
||||
fact['harvested_at'] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
# Update index
|
||||
index['facts'].extend(new_facts)
|
||||
@@ -365,7 +330,7 @@ def harvest_session(session_path: str, knowledge_dir: str, api_base: str, api_ke
|
||||
|
||||
# 8. Write (unless dry run)
|
||||
if new_facts and not dry_run:
|
||||
write_knowledge(existing_index, new_facts, knowledge_dir, source_session=session_path, model=model, provider=extract_provider(api_base))
|
||||
write_knowledge(existing_index, new_facts, knowledge_dir, source_session=session_path)
|
||||
|
||||
stats['elapsed_seconds'] = round(time.time() - start_time, 2)
|
||||
return stats
|
||||
|
||||
203
scripts/release_note_analyzer.py
Executable file
203
scripts/release_note_analyzer.py
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Release Note Analyzer — Monitor dependency releases and extract structured insights.
|
||||
|
||||
Fetches GitHub releases for configured repositories, parses changelogs,
|
||||
categorizes changes, and flags breaking changes.
|
||||
|
||||
Usage:
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo1,owner/repo2
|
||||
python3 scripts/release_note_analyzer.py --repos numpy/numpy --limit 5
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --output metrics/releases.json
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --token $GITHUB_TOKEN
|
||||
|
||||
Output:
|
||||
JSON with per-release structure: version, date, url, categories (features, fixes, breaking), raw_body
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, field, asdict
|
||||
import os
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReleaseAnalysis:
|
||||
version: str
|
||||
date: str
|
||||
url: str
|
||||
categories: Dict[str, List[str]] = field(default_factory=dict)
|
||||
breaking_change_flags: List[str] = field(default_factory=list)
|
||||
raw_body: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
def fetch_github_releases(repo: str, token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch latest releases from GitHub API."""
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
url = f"https://api.github.com/repos/{repo}/releases?per_page={limit}"
|
||||
headers = {"Accept": "application/vnd.github.v3+json"}
|
||||
if token:
|
||||
headers["Authorization"] = f"token {token}"
|
||||
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"Error fetching releases for {repo}: HTTP {e.code}", file=sys.stderr)
|
||||
return []
|
||||
except Exception as e:
|
||||
print(f"Error fetching releases for {repo}: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
|
||||
def categorize_changelog(body: str) -> Dict[str, List[str]]:
|
||||
"""Categorize release note lines into features, fixes, and other."""
|
||||
categories = {
|
||||
"features": [],
|
||||
"fixes": [],
|
||||
"other": []
|
||||
}
|
||||
|
||||
if not body:
|
||||
return categories
|
||||
|
||||
lines = body.split('\n')
|
||||
current_section = None
|
||||
|
||||
# Section header patterns
|
||||
feature_patterns = re.compile(r'^(?:features?|new|add|enhancement)s?', re.IGNORECASE)
|
||||
fix_patterns = re.compile(r'^(?:fix(?:es|ed)?|bug|patch|correction)', re.IGNORECASE)
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
|
||||
# Check for section headers (e.g., "### Features", "## Added")
|
||||
header_match = re.match(r'^#{1,3}\s+(.+)$', stripped)
|
||||
if header_match:
|
||||
header = header_match.group(1).lower()
|
||||
if feature_patterns.search(header):
|
||||
current_section = "features"
|
||||
elif fix_patterns.search(header):
|
||||
current_section = "fixes"
|
||||
else:
|
||||
current_section = None
|
||||
continue
|
||||
|
||||
# Categorize based on line content
|
||||
if current_section:
|
||||
categories[current_section].append(stripped)
|
||||
else:
|
||||
# Infer from keywords
|
||||
if re.search(r'^(?:added|new|feature|introdu)', stripped, re.IGNORECASE):
|
||||
categories["features"].append(stripped)
|
||||
elif re.search(r'^(?:fix|bug|patch|resolved)', stripped, re.IGNORECASE):
|
||||
categories["fixes"].append(stripped)
|
||||
else:
|
||||
categories["other"].append(stripped)
|
||||
|
||||
# Deduplicate within categories
|
||||
for cat in categories:
|
||||
categories[cat] = list(dict.fromkeys(categories[cat]))
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
def detect_breaking_changes(body: str) -> List[str]:
|
||||
"""Detect and extract potential breaking change indicators."""
|
||||
breaking_indicators = []
|
||||
lines = body.split('\n')
|
||||
|
||||
# Keywords that suggest breaking changes
|
||||
breaking_keywords = re.compile(
|
||||
r'\b(?:BREAKING|breaking\s+change|backward\s+incompatible|'
|
||||
r'removed\s+.*?API|deprecated.*?removed|'
|
||||
r'major\s+version|'
|
||||
r'not\s+backward\s+compatible)\b',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
for line in lines:
|
||||
if breaking_keywords.search(line):
|
||||
breaking_indicators.append(line.strip())
|
||||
|
||||
return breaking_indicators
|
||||
|
||||
|
||||
def analyze_releases( repos: List[str], token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch and analyze releases for all configured repos."""
|
||||
all_releases = []
|
||||
|
||||
for repo in repos:
|
||||
repo = repo.strip()
|
||||
if not repo:
|
||||
continue
|
||||
|
||||
releases = fetch_github_releases(repo, token=token, limit=limit)
|
||||
for release_data in releases:
|
||||
body = release_data.get('body') or ""
|
||||
tag = release_data.get('tag_name', 'unknown')
|
||||
date = release_data.get('published_at', '')
|
||||
url = release_data.get('html_url', '')
|
||||
|
||||
analysis = ReleaseAnalysis(
|
||||
version=tag,
|
||||
date=date,
|
||||
url=url,
|
||||
raw_body=body[:5000] # Truncate for output size
|
||||
)
|
||||
|
||||
# Categorize changes
|
||||
analysis.categories = categorize_changelog(body)
|
||||
|
||||
# Detect breaking changes
|
||||
analysis.breaking_change_flags = detect_breaking_changes(body)
|
||||
|
||||
all_releases.append(analysis.to_dict())
|
||||
|
||||
return all_releases
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Analyze GitHub release notes for changes and breaking changes")
|
||||
parser.add_argument('--repos', required=True, help='Comma-separated list of GitHub repos (owner/repo)')
|
||||
parser.add_argument('--token', help='GitHub API token (or set GITHUB_TOKEN env var)')
|
||||
parser.add_argument('--limit', type=int, default=10, help='Max releases per repo (default: 10)')
|
||||
parser.add_argument('--output', help='Write JSON output to file (default: stdout)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
repos = [r.strip() for r in args.repos.split(',')]
|
||||
token = args.token or os.environ.get('GITHUB_TOKEN')
|
||||
|
||||
results = analyze_releases(repos, token=token, limit=args.limit)
|
||||
|
||||
output = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"repos": repos,
|
||||
"release_count": len(results),
|
||||
"releases": results
|
||||
}
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(output, f, indent=2)
|
||||
print(f"Wrote {len(results)} releases to {args.output}")
|
||||
else:
|
||||
print(json.dumps(output, indent=2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
107
tests/test_release_note_analyzer.py
Normal file
107
tests/test_release_note_analyzer.py
Normal file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/release_note_analyzer.py"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or ".", ".."))
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"release_note_analyzer",
|
||||
os.path.join(os.path.dirname(__file__) or ".", "..", "scripts", "release_note_analyzer.py")
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
categorize_changelog = mod.categorize_changelog
|
||||
detect_breaking_changes = mod.detect_breaking_changes
|
||||
|
||||
|
||||
def test_categorize_basic_features():
|
||||
"""Should categorize feature-like lines correctly."""
|
||||
body = """
|
||||
### Features
|
||||
- Added new API endpoint
|
||||
- Introduced batch processing
|
||||
|
||||
### Bug Fixes
|
||||
- Fixed memory leak
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["features"]) >= 1, f"Got features: {categories['features']}"
|
||||
assert any("batch" in line or "API" in line for line in categories["features"])
|
||||
assert any("memory leak" in line for line in categories["fixes"])
|
||||
print("PASS: test_categorize_basic_features")
|
||||
|
||||
|
||||
def test_categorize_fixes():
|
||||
"""Should categorize bug fix lines correctly."""
|
||||
body = """
|
||||
## Fixed
|
||||
- Resolved crash on startup
|
||||
- Patched security vulnerability
|
||||
|
||||
## Changed
|
||||
- Updated documentation
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert any("crash" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
assert any("security" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
print("PASS: test_categorize_fixes")
|
||||
|
||||
|
||||
def test_categorize_other():
|
||||
"""Uncategorized lines should go to 'other'."""
|
||||
body = "- Some random note\n- Another note"
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["other"]) >= 2
|
||||
print("PASS: test_categorize_other")
|
||||
|
||||
|
||||
def test_detect_breaking_changes():
|
||||
"""Should flag lines containing breaking change keywords."""
|
||||
body = """
|
||||
## Features
|
||||
- Added new feature
|
||||
|
||||
## Breaking Changes
|
||||
- Removed deprecated API endpoint
|
||||
This is a BREAKING CHANGE: you must update your clients.
|
||||
|
||||
We also removed support for Python 3.8.
|
||||
"""
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 2, f"Expected >=2 breaking flags, got {len(flags)}: {flags}"
|
||||
assert any("deprecated API" in f for f in flags), f"Missing: {flags}"
|
||||
assert any("BREAKING CHANGE" in f for f in flags), f"Missing: {flags}"
|
||||
print("PASS: test_detect_breaking_changes")
|
||||
|
||||
|
||||
def test_detect_breaking_changes_case_insensitive():
|
||||
"""Breaking change detection should be case-insensitive."""
|
||||
body = "This is a breaking change: old behavior removed"
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 1
|
||||
print("PASS: test_detect_breaking_changes_case_insensitive")
|
||||
|
||||
|
||||
def test_empty_body():
|
||||
"""Empty body should produce empty categories and no breaking flags."""
|
||||
body = ""
|
||||
categories = categorize_changelog(body)
|
||||
assert categories["features"] == []
|
||||
assert categories["fixes"] == []
|
||||
assert detect_breaking_changes(body) == []
|
||||
print("PASS: test_empty_body")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_categorize_basic_features()
|
||||
test_categorize_fixes()
|
||||
test_categorize_other()
|
||||
test_detect_breaking_changes()
|
||||
test_detect_breaking_changes_case_insensitive()
|
||||
test_empty_body()
|
||||
print("\nAll release_note_analyzer tests passed.")
|
||||
Reference in New Issue
Block a user