Compare commits
1 Commits
feat/177-i
...
feat/sessi
| Author | SHA1 | Date | |
|---|---|---|---|
| 160dfcf419 |
@@ -1,131 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Gitea Issue Body Parser — Extract structured data from markdown issue bodies.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
cat issue_body.txt | python3 scripts/gitea_issue_parser.py --stdin --pretty
|
|
||||||
python3 scripts/gitea_issue_parser.py --url https://forge.../api/v1/repos/.../issues/123 --pretty
|
|
||||||
python3 scripts/gitea_issue_parser.py body.txt --title "Fix thing (#42)" --labels pipeline extraction
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
from typing import Dict, List, Any, Optional
|
|
||||||
|
|
||||||
|
|
||||||
def parse_issue_body(body: str, title: str = "", labels: List[str] = None) -> Dict[str, Any]:
|
|
||||||
"""Parse a Gitea issue markdown body into structured JSON.
|
|
||||||
|
|
||||||
Extracted fields:
|
|
||||||
- title: Issue title
|
|
||||||
- context: Background/description section
|
|
||||||
- criteria[]: Acceptance criteria (checkboxes or numbered lists)
|
|
||||||
- labels[]: Issue labels
|
|
||||||
- epic_ref: Parent/epic issue reference (from "Closes #N" or title)
|
|
||||||
- sections{}: All ## sections as key-value pairs
|
|
||||||
"""
|
|
||||||
result = {
|
|
||||||
"title": title,
|
|
||||||
"context": "",
|
|
||||||
"criteria": [],
|
|
||||||
"labels": labels or [],
|
|
||||||
"epic_ref": None,
|
|
||||||
"sections": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
if not body:
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Extract epic reference from title or body
|
|
||||||
epic_patterns = [
|
|
||||||
r"(?:closes|fixes|addresses|refs?)\s+#(\d+)",
|
|
||||||
r"#(\d+)",
|
|
||||||
]
|
|
||||||
for pattern in epic_patterns:
|
|
||||||
match = re.search(pattern, (title + " " + body).lower())
|
|
||||||
if match:
|
|
||||||
result["epic_ref"] = int(match.group(1))
|
|
||||||
break
|
|
||||||
|
|
||||||
# Parse ## sections
|
|
||||||
section_pattern = r"^##\s+(.+?)$\n((?:^(?!##\s).*$\n?)*)"
|
|
||||||
for match in re.finditer(section_pattern, body, re.MULTILINE):
|
|
||||||
section_name = match.group(1).strip().lower().replace(" ", "_")
|
|
||||||
section_content = match.group(2).strip()
|
|
||||||
result["sections"][section_name] = section_content
|
|
||||||
|
|
||||||
# Extract acceptance criteria (checkboxes)
|
|
||||||
checkbox_pattern = r"^\s*-\s*\[([ xX])\]\s*(.+)$"
|
|
||||||
for match in re.finditer(checkbox_pattern, body, re.MULTILINE):
|
|
||||||
checked = match.group(1).lower() == "x"
|
|
||||||
text = match.group(2).strip()
|
|
||||||
result["criteria"].append({"text": text, "checked": checked})
|
|
||||||
|
|
||||||
# If no checkboxes, try numbered lists in "Acceptance Criteria" or "Criteria" section
|
|
||||||
if not result["criteria"]:
|
|
||||||
for section_name in ["acceptance_criteria", "criteria", "acceptance criteria"]:
|
|
||||||
if section_name in result["sections"]:
|
|
||||||
numbered = r"^\s*\d+\.\s*(.+)$"
|
|
||||||
for match in re.finditer(numbered, result["sections"][section_name], re.MULTILINE):
|
|
||||||
result["criteria"].append({"text": match.group(1).strip(), "checked": False})
|
|
||||||
break
|
|
||||||
|
|
||||||
# Extract context (first section or first paragraph before any ## heading)
|
|
||||||
first_heading = body.find("## ")
|
|
||||||
if first_heading > 0:
|
|
||||||
context_text = body[:first_heading].strip()
|
|
||||||
else:
|
|
||||||
context_text = body.split("\n\n")[0].strip()
|
|
||||||
# Clean up: remove "## Context" or "## Problem" header if present
|
|
||||||
context_text = re.sub(r"^#+\s*\w+\s*\n?", "", context_text).strip()
|
|
||||||
result["context"] = context_text[:500] # Cap at 500 chars
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_issue_from_url(url: str) -> Dict[str, Any]:
|
|
||||||
"""Fetch an issue from a Gitea API URL and parse it."""
|
|
||||||
import urllib.request
|
|
||||||
req = urllib.request.Request(url, headers={"Accept": "application/json"})
|
|
||||||
with urllib.request.urlopen(req) as resp:
|
|
||||||
data = json.loads(resp.read())
|
|
||||||
|
|
||||||
return parse_issue_body(
|
|
||||||
body=data.get("body", ""),
|
|
||||||
title=data.get("title", ""),
|
|
||||||
labels=[l["name"] for l in data.get("labels", [])]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description="Parse Gitea issue markdown into structured JSON")
|
|
||||||
parser.add_argument("file", nargs="?", help="Issue body file (or use --stdin)")
|
|
||||||
parser.add_argument("--stdin", action="store_true", help="Read from stdin")
|
|
||||||
parser.add_argument("--url", help="Gitea API URL to fetch issue from")
|
|
||||||
parser.add_argument("--title", default="", help="Issue title")
|
|
||||||
parser.add_argument("--labels", nargs="*", default=[], help="Issue labels")
|
|
||||||
parser.add_argument("--pretty", action="store_true", help="Pretty-print JSON output")
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if args.url:
|
|
||||||
result = fetch_issue_from_url(args.url)
|
|
||||||
elif args.stdin:
|
|
||||||
body = sys.stdin.read()
|
|
||||||
result = parse_issue_body(body, args.title, args.labels)
|
|
||||||
elif args.file:
|
|
||||||
with open(args.file) as f:
|
|
||||||
body = f.read()
|
|
||||||
result = parse_issue_body(body, args.title, args.labels)
|
|
||||||
else:
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
indent = 2 if args.pretty else None
|
|
||||||
print(json.dumps(result, indent=indent))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
276
scripts/session_metadata.py
Normal file
276
scripts/session_metadata.py
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
session_metadata.py - Extract structured metadata from Hermes session transcripts.
|
||||||
|
Works alongside session_reader.py to provide higher-level session analysis.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass, asdict
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
|
||||||
|
# Import from session_reader (the canonical reader)
|
||||||
|
from session_reader import read_session
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SessionSummary:
|
||||||
|
"""Structured summary of a Hermes session transcript."""
|
||||||
|
session_id: str
|
||||||
|
model: str
|
||||||
|
repo: str
|
||||||
|
outcome: str
|
||||||
|
message_count: int
|
||||||
|
tool_calls: int
|
||||||
|
duration_estimate: str
|
||||||
|
key_actions: List[str]
|
||||||
|
errors_encountered: List[str]
|
||||||
|
start_time: Optional[str] = None
|
||||||
|
end_time: Optional[str] = None
|
||||||
|
total_tokens_estimate: int = 0
|
||||||
|
user_messages: int = 0
|
||||||
|
assistant_messages: int = 0
|
||||||
|
tool_outputs: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
def extract_session_metadata(file_path: str) -> SessionSummary:
|
||||||
|
"""
|
||||||
|
Extract structured metadata from a Hermes session JSONL transcript.
|
||||||
|
Uses session_reader.read_session() for file reading.
|
||||||
|
"""
|
||||||
|
session_id = Path(file_path).stem
|
||||||
|
messages = []
|
||||||
|
model = "unknown"
|
||||||
|
repo = "unknown"
|
||||||
|
tool_calls_count = 0
|
||||||
|
key_actions = []
|
||||||
|
errors = []
|
||||||
|
start_time = None
|
||||||
|
end_time = None
|
||||||
|
total_tokens = 0
|
||||||
|
|
||||||
|
# Common repo patterns to look for
|
||||||
|
repo_patterns = [
|
||||||
|
r"(?:the-nexus|compounding-intelligence|timmy-config|hermes-agent)",
|
||||||
|
r"(?:forge\.alexanderwhitestone\.com/([^/]+/[^/\\s]+))",
|
||||||
|
r"(?:github\.com/([^/]+/[^/\\s]+))",
|
||||||
|
r"(?:Timmy_Foundation/([^/\\s]+))",
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use the canonical reader from session_reader.py
|
||||||
|
messages = read_session(file_path)
|
||||||
|
except FileNotFoundError:
|
||||||
|
return SessionSummary(
|
||||||
|
session_id=session_id,
|
||||||
|
model="unknown",
|
||||||
|
repo="unknown",
|
||||||
|
outcome="failure",
|
||||||
|
message_count=0,
|
||||||
|
tool_calls=0,
|
||||||
|
duration_estimate="0m",
|
||||||
|
key_actions=[],
|
||||||
|
errors_encountered=[f"File not found: {file_path}"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process messages for metadata
|
||||||
|
for entry in messages:
|
||||||
|
# Extract model from assistant messages
|
||||||
|
if entry.get("role") == "assistant" and entry.get("model"):
|
||||||
|
model = entry["model"]
|
||||||
|
|
||||||
|
# Extract timestamps
|
||||||
|
if entry.get("timestamp"):
|
||||||
|
ts = entry["timestamp"]
|
||||||
|
if start_time is None:
|
||||||
|
start_time = ts
|
||||||
|
end_time = ts
|
||||||
|
|
||||||
|
# Count tool calls
|
||||||
|
if entry.get("tool_calls"):
|
||||||
|
tool_calls_count += len(entry["tool_calls"])
|
||||||
|
for tc in entry["tool_calls"]:
|
||||||
|
if tc.get("function", {}).get("name"):
|
||||||
|
action = f"{tc['function']['name']}"
|
||||||
|
if action not in key_actions:
|
||||||
|
key_actions.append(action)
|
||||||
|
|
||||||
|
# Estimate tokens from content length
|
||||||
|
content = entry.get("content", "")
|
||||||
|
if isinstance(content, str):
|
||||||
|
total_tokens += len(content.split())
|
||||||
|
elif isinstance(content, list):
|
||||||
|
for item in content:
|
||||||
|
if isinstance(item, dict) and "text" in item:
|
||||||
|
total_tokens += len(item["text"].split())
|
||||||
|
|
||||||
|
# Look for repo mentions in content
|
||||||
|
if entry.get("content"):
|
||||||
|
content_str = str(entry["content"])
|
||||||
|
for pattern in repo_patterns:
|
||||||
|
match = re.search(pattern, content_str, re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
if match.groups():
|
||||||
|
repo = match.group(1)
|
||||||
|
else:
|
||||||
|
repo = match.group(0)
|
||||||
|
break
|
||||||
|
|
||||||
|
# Look for error messages
|
||||||
|
if entry.get("role") == "tool" and entry.get("is_error"):
|
||||||
|
error_msg = entry.get("content", "Unknown error")
|
||||||
|
if isinstance(error_msg, str) and len(error_msg) < 200:
|
||||||
|
errors.append(error_msg[:200])
|
||||||
|
|
||||||
|
# Count message types
|
||||||
|
user_messages = sum(1 for m in messages if m.get("role") == "user")
|
||||||
|
assistant_messages = sum(1 for m in messages if m.get("role") == "assistant")
|
||||||
|
tool_outputs = sum(1 for m in messages if m.get("role") == "tool")
|
||||||
|
|
||||||
|
# Calculate duration estimate
|
||||||
|
duration_estimate = "unknown"
|
||||||
|
if start_time and end_time:
|
||||||
|
try:
|
||||||
|
# Try to parse timestamps
|
||||||
|
start_dt = None
|
||||||
|
end_dt = None
|
||||||
|
|
||||||
|
# Handle various timestamp formats
|
||||||
|
for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d %H:%M:%S"]:
|
||||||
|
try:
|
||||||
|
if start_dt is None:
|
||||||
|
start_dt = datetime.strptime(start_time, fmt)
|
||||||
|
if end_dt is None:
|
||||||
|
end_dt = datetime.strptime(end_time, fmt)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if start_dt and end_dt:
|
||||||
|
duration = end_dt - start_dt
|
||||||
|
minutes = duration.total_seconds() / 60
|
||||||
|
duration_estimate = f"{minutes:.0f}m"
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Classify outcome
|
||||||
|
outcome = "unknown"
|
||||||
|
if errors:
|
||||||
|
# Check if any errors are fatal
|
||||||
|
fatal_errors = any("405" in e or "permission" in e.lower() or "authentication" in e.lower()
|
||||||
|
for e in errors)
|
||||||
|
if fatal_errors:
|
||||||
|
outcome = "failure"
|
||||||
|
else:
|
||||||
|
outcome = "partial"
|
||||||
|
elif messages:
|
||||||
|
# Check last message for success indicators
|
||||||
|
last_msg = messages[-1]
|
||||||
|
if last_msg.get("role") == "assistant":
|
||||||
|
content = last_msg.get("content", "")
|
||||||
|
if isinstance(content, str):
|
||||||
|
success_indicators = ["done", "completed", "success", "merged", "pushed"]
|
||||||
|
if any(indicator in content.lower() for indicator in success_indicators):
|
||||||
|
outcome = "success"
|
||||||
|
else:
|
||||||
|
outcome = "unknown"
|
||||||
|
|
||||||
|
# Deduplicate key actions (keep unique, limit to 10)
|
||||||
|
unique_actions = []
|
||||||
|
for action in key_actions:
|
||||||
|
if action not in unique_actions:
|
||||||
|
unique_actions.append(action)
|
||||||
|
if len(unique_actions) >= 10:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Deduplicate errors (keep unique, limit to 5)
|
||||||
|
unique_errors = []
|
||||||
|
for error in errors:
|
||||||
|
if error not in unique_errors:
|
||||||
|
unique_errors.append(error)
|
||||||
|
if len(unique_errors) >= 5:
|
||||||
|
break
|
||||||
|
|
||||||
|
return SessionSummary(
|
||||||
|
session_id=session_id,
|
||||||
|
model=model,
|
||||||
|
repo=repo,
|
||||||
|
outcome=outcome,
|
||||||
|
message_count=len(messages),
|
||||||
|
tool_calls=tool_calls_count,
|
||||||
|
duration_estimate=duration_estimate,
|
||||||
|
key_actions=unique_actions,
|
||||||
|
errors_encountered=unique_errors,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
total_tokens_estimate=total_tokens,
|
||||||
|
user_messages=user_messages,
|
||||||
|
assistant_messages=assistant_messages,
|
||||||
|
tool_outputs=tool_outputs
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def process_session_directory(directory_path: str, output_file: Optional[str] = None) -> List[SessionSummary]:
|
||||||
|
"""
|
||||||
|
Process all JSONL files in a directory.
|
||||||
|
"""
|
||||||
|
directory = Path(directory_path)
|
||||||
|
if not directory.exists():
|
||||||
|
print(f"Error: Directory {directory_path} does not exist", file=sys.stderr)
|
||||||
|
return []
|
||||||
|
|
||||||
|
jsonl_files = list(directory.glob("*.jsonl"))
|
||||||
|
if not jsonl_files:
|
||||||
|
print(f"Warning: No JSONL files found in {directory_path}", file=sys.stderr)
|
||||||
|
return []
|
||||||
|
|
||||||
|
summaries = []
|
||||||
|
for jsonl_file in sorted(jsonl_files):
|
||||||
|
print(f"Processing {jsonl_file.name}...", file=sys.stderr)
|
||||||
|
summary = extract_session_metadata(str(jsonl_file))
|
||||||
|
summaries.append(summary)
|
||||||
|
|
||||||
|
if output_file:
|
||||||
|
with open(output_file, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump([asdict(s) for s in summaries], f, indent=2)
|
||||||
|
print(f"Wrote {len(summaries)} summaries to {output_file}", file=sys.stderr)
|
||||||
|
|
||||||
|
return summaries
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""CLI entry point."""
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Extract metadata from Hermes session JSONL transcripts")
|
||||||
|
parser.add_argument("path", help="Path to JSONL file or directory of session files")
|
||||||
|
parser.add_argument("-o", "--output", help="Output JSON file (default: stdout)")
|
||||||
|
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
path = Path(args.path)
|
||||||
|
|
||||||
|
if path.is_file():
|
||||||
|
summary = extract_session_metadata(str(path))
|
||||||
|
if args.output:
|
||||||
|
with open(args.output, 'w') as f:
|
||||||
|
json.dump(asdict(summary), f, indent=2)
|
||||||
|
print(f"Wrote summary to {args.output}", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print(json.dumps(asdict(summary), indent=2))
|
||||||
|
|
||||||
|
elif path.is_dir():
|
||||||
|
summaries = process_session_directory(str(path), args.output)
|
||||||
|
if not args.output:
|
||||||
|
print(json.dumps([asdict(s) for s in summaries], indent=2))
|
||||||
|
|
||||||
|
else:
|
||||||
|
print(f"Error: {args.path} is not a file or directory", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Tests for scripts/gitea_issue_parser.py"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
|
||||||
|
|
||||||
# Import from sibling
|
|
||||||
import importlib.util
|
|
||||||
spec = importlib.util.spec_from_file_location("parser", os.path.join(os.path.dirname(__file__) or ".", "gitea_issue_parser.py"))
|
|
||||||
mod = importlib.util.module_from_spec(spec)
|
|
||||||
spec.loader.exec_module(mod)
|
|
||||||
parse_issue_body = mod.parse_issue_body
|
|
||||||
|
|
||||||
|
|
||||||
def test_basic_parsing():
|
|
||||||
body = """## Context
|
|
||||||
|
|
||||||
This is the background info.
|
|
||||||
|
|
||||||
## Acceptance Criteria
|
|
||||||
|
|
||||||
- [ ] First criterion
|
|
||||||
- [x] Second criterion (done)
|
|
||||||
|
|
||||||
## What to build
|
|
||||||
|
|
||||||
Some description.
|
|
||||||
"""
|
|
||||||
result = parse_issue_body(body, title="Test (#42)", labels=["bug"])
|
|
||||||
assert result["title"] == "Test (#42)"
|
|
||||||
assert result["labels"] == ["bug"]
|
|
||||||
assert result["epic_ref"] == 42
|
|
||||||
assert len(result["criteria"]) == 2
|
|
||||||
assert result["criteria"][0]["text"] == "First criterion"
|
|
||||||
assert result["criteria"][0]["checked"] == False
|
|
||||||
assert result["criteria"][1]["checked"] == True
|
|
||||||
assert "context" in result["sections"]
|
|
||||||
print("PASS: test_basic_parsing")
|
|
||||||
|
|
||||||
|
|
||||||
def test_numbered_criteria():
|
|
||||||
body = """## Acceptance Criteria
|
|
||||||
|
|
||||||
1. First item
|
|
||||||
2. Second item
|
|
||||||
3. Third item
|
|
||||||
"""
|
|
||||||
result = parse_issue_body(body)
|
|
||||||
assert len(result["criteria"]) == 3
|
|
||||||
assert result["criteria"][0]["text"] == "First item"
|
|
||||||
print("PASS: test_numbered_criteria")
|
|
||||||
|
|
||||||
|
|
||||||
def test_epic_ref_from_body():
|
|
||||||
body = "Closes #123\n\nSome description."
|
|
||||||
result = parse_issue_body(body)
|
|
||||||
assert result["epic_ref"] == 123
|
|
||||||
print("PASS: test_epic_ref_from_body")
|
|
||||||
|
|
||||||
|
|
||||||
def test_empty_body():
|
|
||||||
result = parse_issue_body("")
|
|
||||||
assert result["criteria"] == []
|
|
||||||
assert result["context"] == ""
|
|
||||||
assert result["sections"] == {}
|
|
||||||
print("PASS: test_empty_body")
|
|
||||||
|
|
||||||
|
|
||||||
def test_no_sections():
|
|
||||||
body = "Just a plain issue body with no headings."
|
|
||||||
result = parse_issue_body(body)
|
|
||||||
assert result["context"] == "Just a plain issue body with no headings."
|
|
||||||
print("PASS: test_no_sections")
|
|
||||||
|
|
||||||
|
|
||||||
def test_multiple_sections():
|
|
||||||
body = """## Problem
|
|
||||||
|
|
||||||
Something is broken.
|
|
||||||
|
|
||||||
## Fix
|
|
||||||
|
|
||||||
Do this instead.
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
Additional info.
|
|
||||||
"""
|
|
||||||
result = parse_issue_body(body)
|
|
||||||
assert "problem" in result["sections"]
|
|
||||||
assert "fix" in result["sections"]
|
|
||||||
assert "notes" in result["sections"]
|
|
||||||
assert "Something is broken" in result["sections"]["problem"]
|
|
||||||
print("PASS: test_multiple_sections")
|
|
||||||
|
|
||||||
|
|
||||||
def run_all():
|
|
||||||
test_basic_parsing()
|
|
||||||
test_numbered_criteria()
|
|
||||||
test_epic_ref_from_body()
|
|
||||||
test_empty_body()
|
|
||||||
test_no_sections()
|
|
||||||
test_multiple_sections()
|
|
||||||
print("\nAll 6 tests passed!")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
run_all()
|
|
||||||
Reference in New Issue
Block a user